1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2021 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "libiberty.h"
29 #include "elf-vxworks.h"
31 #include "elf32-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
78 static reloc_howto_type elf32_arm_howto_table_1[] =
81 HOWTO (R_ARM_NONE, /* type */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
85 FALSE, /* pc_relative */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE, /* partial_inplace */
93 FALSE), /* pcrel_offset */
95 HOWTO (R_ARM_PC24, /* type */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
99 TRUE, /* pc_relative */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
114 FALSE, /* pc_relative */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
129 TRUE, /* pc_relative */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
144 TRUE, /* pc_relative */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
159 FALSE, /* pc_relative */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
174 FALSE, /* pc_relative */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5, /* type */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
188 FALSE, /* pc_relative */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE), /* pcrel_offset */
199 HOWTO (R_ARM_ABS8, /* type */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
203 FALSE, /* pc_relative */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32, /* type */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
217 FALSE, /* pc_relative */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL, /* type */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
231 TRUE, /* pc_relative */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8, /* type */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
245 TRUE, /* pc_relative */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ, /* type */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
259 FALSE, /* pc_relative */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC, /* type */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
273 FALSE, /* pc_relative */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8, /* type */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
287 FALSE, /* pc_relative */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
302 TRUE, /* pc_relative */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
317 TRUE, /* pc_relative */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
333 FALSE, /* pc_relative */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
347 FALSE, /* pc_relative */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
361 FALSE, /* pc_relative */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 FALSE, /* pc_relative */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 FALSE, /* pc_relative */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
405 FALSE, /* pc_relative */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE, /* type */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
419 FALSE, /* pc_relative */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32, /* type */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
433 FALSE, /* pc_relative */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC, /* type */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
447 TRUE, /* pc_relative */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32, /* type */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
461 FALSE, /* pc_relative */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32, /* type */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
475 TRUE, /* pc_relative */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE), /* pcrel_offset */
485 HOWTO (R_ARM_CALL, /* type */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
489 TRUE, /* pc_relative */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24, /* type */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
503 TRUE, /* pc_relative */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24, /* type */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
517 TRUE, /* pc_relative */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS, /* type */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
531 FALSE, /* pc_relative */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
545 TRUE, /* pc_relative */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
559 TRUE, /* pc_relative */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
573 TRUE, /* pc_relative */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
587 FALSE, /* pc_relative */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
601 FALSE, /* pc_relative */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
615 FALSE, /* pc_relative */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1, /* type */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
629 FALSE, /* pc_relative */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32, /* type */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
643 FALSE, /* pc_relative */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX, /* type */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
657 FALSE, /* pc_relative */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2, /* type */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
671 FALSE, /* pc_relative */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31, /* type */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
685 TRUE, /* pc_relative */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
699 FALSE, /* pc_relative */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS, /* type */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
713 FALSE, /* pc_relative */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
727 TRUE, /* pc_relative */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL, /* type */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
741 TRUE, /* pc_relative */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 FALSE, /* pc_relative */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
769 FALSE, /* pc_relative */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
783 TRUE, /* pc_relative */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
797 TRUE, /* pc_relative */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19, /* type */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
811 TRUE, /* pc_relative */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6, /* type */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
825 TRUE, /* pc_relative */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
842 TRUE, /* pc_relative */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12, /* type */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
856 TRUE, /* pc_relative */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI, /* type */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
870 FALSE, /* pc_relative */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI, /* type */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
884 TRUE, /* pc_relative */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
900 TRUE, /* pc_relative */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
914 TRUE, /* pc_relative */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
928 TRUE, /* pc_relative */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
942 TRUE, /* pc_relative */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 TRUE, /* pc_relative */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
970 TRUE, /* pc_relative */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
984 TRUE, /* pc_relative */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
998 TRUE, /* pc_relative */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1012 TRUE, /* pc_relative */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 TRUE, /* pc_relative */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 TRUE, /* pc_relative */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 TRUE, /* pc_relative */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 TRUE, /* pc_relative */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 TRUE, /* pc_relative */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 TRUE, /* pc_relative */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 TRUE, /* pc_relative */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 TRUE, /* pc_relative */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 TRUE, /* pc_relative */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 TRUE, /* pc_relative */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 TRUE, /* pc_relative */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 TRUE, /* pc_relative */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 TRUE, /* pc_relative */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1208 TRUE, /* pc_relative */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1222 TRUE, /* pc_relative */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1236 TRUE, /* pc_relative */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1250 TRUE, /* pc_relative */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1264 TRUE, /* pc_relative */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1280 FALSE, /* pc_relative */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1294 FALSE, /* pc_relative */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1308 FALSE, /* pc_relative */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1322 FALSE, /* pc_relative */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1336 FALSE, /* pc_relative */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1350 FALSE, /* pc_relative */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE, /* pc_relative */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE, /* pc_relative */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 FALSE, /* pc_relative */
1394 complain_overflow_dont,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE, /* pc_relative */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE, /* pc_relative */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1434 FALSE, /* pc_relative */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1448 TRUE, /* pc_relative */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1462 FALSE, /* pc_relative */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1476 FALSE, /* pc_relative */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1493 FALSE, /* pc_relative */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE, /* partial_inplace */
1501 FALSE), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 FALSE, /* pc_relative */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE, /* partial_inplace */
1516 FALSE), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1522 TRUE, /* pc_relative */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1536 TRUE, /* pc_relative */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE, /* pc_relative */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE, /* pc_relative */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE, /* pc_relative */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE, /* pc_relative */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1607 FALSE, /* pc_relative */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1621 FALSE, /* pc_relative */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1635 FALSE, /* pc_relative */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1649 FALSE, /* pc_relative */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1677 /* R_ARM_ME_TOO, obsolete. */
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1684 FALSE, /* pc_relative */
1686 complain_overflow_dont,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE), /* pcrel_offset */
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1699 FALSE, /* pc_relative. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 FALSE, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 FALSE), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1712 FALSE, /* pc_relative. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 FALSE, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 FALSE), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1725 FALSE, /* pc_relative. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 FALSE, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 FALSE), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1738 FALSE, /* pc_relative. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 FALSE, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 FALSE), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1752 TRUE, /* pc_relative. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 FALSE, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 TRUE), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1765 TRUE, /* pc_relative. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 FALSE, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 TRUE), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1778 TRUE, /* pc_relative. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 FALSE, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 TRUE), /* pcrel_offset. */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1796 FALSE, /* pc_relative */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 TRUE, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 FALSE), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1809 FALSE, /* pc_relative */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 FALSE, /* partial_inplace */
1816 0xffffffff, /* dst_mask */
1817 FALSE), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1822 FALSE, /* pc_relative */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 FALSE, /* partial_inplace */
1829 0xffffffff, /* dst_mask */
1830 FALSE), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1835 FALSE, /* pc_relative */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 FALSE, /* partial_inplace */
1842 0xffffffff, /* dst_mask */
1843 FALSE), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1848 FALSE, /* pc_relative */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 FALSE, /* partial_inplace */
1855 0xffffffff, /* dst_mask */
1856 FALSE), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1861 FALSE, /* pc_relative */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 FALSE, /* partial_inplace */
1868 0xffffffff, /* dst_mask */
1869 FALSE), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1874 FALSE, /* pc_relative */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 FALSE, /* partial_inplace */
1881 0xffffffff, /* dst_mask */
1882 FALSE), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1887 FALSE, /* pc_relative */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 FALSE, /* partial_inplace */
1894 0xffffffff, /* dst_mask */
1895 FALSE), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1901 HOWTO (R_ARM_RREL32, /* type */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1905 FALSE, /* pc_relative */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 FALSE, /* partial_inplace */
1913 FALSE), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32, /* type */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1919 FALSE, /* pc_relative */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 FALSE, /* partial_inplace */
1927 FALSE), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24, /* type */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1933 FALSE, /* pc_relative */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 FALSE, /* partial_inplace */
1941 FALSE), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE, /* type */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1947 FALSE, /* pc_relative */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 FALSE, /* partial_inplace */
1955 FALSE) /* pcrel_offset */
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1979 unsigned int r_type;
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1987 bfd_set_error (bfd_error_bad_value);
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2141 /* Support for core dump NOTE sections. */
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2149 switch (note->descsz)
2154 case 148: /* Linux/ARM 32-bit. */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2176 switch (note->descsz)
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2215 char data[124] ATTRIBUTE_NONSTRING;
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline [] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2387 static const bfd_vma elf32_arm_plt_entry [] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2504 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2539 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2540 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2541 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2542 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2543 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2544 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2545 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2546 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2556 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2557 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2558 is inserted in arm_build_one_stub(). */
2559 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2560 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2561 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2562 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2563 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2564 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2565 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2566 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2571 enum stub_insn_type type;
2572 unsigned int r_type;
2576 /* See note [Thumb nop sequence] when adding a veneer. */
2578 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2579 to reach the stub if necessary. */
2580 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2582 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2583 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2586 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2588 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2590 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2591 ARM_INSN (0xe12fff1c), /* bx ip */
2592 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2595 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2596 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2598 THUMB16_INSN (0xb401), /* push {r0} */
2599 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2600 THUMB16_INSN (0x4684), /* mov ip, r0 */
2601 THUMB16_INSN (0xbc01), /* pop {r0} */
2602 THUMB16_INSN (0x4760), /* bx ip */
2603 THUMB16_INSN (0xbf00), /* nop */
2604 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2607 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2610 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2611 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2614 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2615 M-profile architectures. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2618 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2619 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2620 THUMB16_INSN (0x4760), /* bx ip */
2623 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2625 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2627 THUMB16_INSN (0x4778), /* bx pc */
2628 THUMB16_INSN (0xe7fd), /* b .-2 */
2629 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2630 ARM_INSN (0xe12fff1c), /* bx ip */
2631 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2634 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2636 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2638 THUMB16_INSN (0x4778), /* bx pc */
2639 THUMB16_INSN (0xe7fd), /* b .-2 */
2640 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2641 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2644 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2645 one, when the destination is close enough. */
2646 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2648 THUMB16_INSN (0x4778), /* bx pc */
2649 THUMB16_INSN (0xe7fd), /* b .-2 */
2650 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2653 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2654 blx to reach the stub if necessary. */
2655 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2657 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2658 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2659 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2662 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2663 blx to reach the stub if necessary. We can not add into pc;
2664 it is not guaranteed to mode switch (different in ARMv6 and
2666 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2668 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2669 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2670 ARM_INSN (0xe12fff1c), /* bx ip */
2671 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2674 /* V4T ARM -> ARM long branch stub, PIC. */
2675 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2677 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2678 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2679 ARM_INSN (0xe12fff1c), /* bx ip */
2680 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2683 /* V4T Thumb -> ARM long branch stub, PIC. */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2686 THUMB16_INSN (0x4778), /* bx pc */
2687 THUMB16_INSN (0xe7fd), /* b .-2 */
2688 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2689 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2690 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2693 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2695 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2697 THUMB16_INSN (0xb401), /* push {r0} */
2698 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2699 THUMB16_INSN (0x46fc), /* mov ip, pc */
2700 THUMB16_INSN (0x4484), /* add ip, r0 */
2701 THUMB16_INSN (0xbc01), /* pop {r0} */
2702 THUMB16_INSN (0x4760), /* bx ip */
2703 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2706 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2708 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2710 THUMB16_INSN (0x4778), /* bx pc */
2711 THUMB16_INSN (0xe7fd), /* b .-2 */
2712 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2713 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2714 ARM_INSN (0xe12fff1c), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2718 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2719 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2720 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2722 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2723 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2724 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2727 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2728 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2729 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2731 THUMB16_INSN (0x4778), /* bx pc */
2732 THUMB16_INSN (0xe7fd), /* b .-2 */
2733 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2734 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2735 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2738 /* NaCl ARM -> ARM long branch stub. */
2739 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2741 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2742 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2743 ARM_INSN (0xe12fff1c), /* bx ip */
2744 ARM_INSN (0xe320f000), /* nop */
2745 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2746 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2747 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2748 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2751 /* NaCl ARM -> ARM long branch stub, PIC. */
2752 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2754 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2755 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2756 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2757 ARM_INSN (0xe12fff1c), /* bx ip */
2758 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2759 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2760 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2761 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2764 /* Stub used for transition to secure state (aka SG veneer). */
2765 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2767 THUMB32_INSN (0xe97fe97f), /* sg. */
2768 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2772 /* Cortex-A8 erratum-workaround stubs. */
2774 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2775 can't use a conditional branch to reach this stub). */
2777 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2779 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2781 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2784 /* Stub used for b.w and bl.w instructions. */
2786 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2788 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2791 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2793 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2796 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2797 instruction (which switches to ARM mode) to point to this stub. Jump to the
2798 real destination using an ARM-mode branch. */
2800 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2802 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2805 /* For each section group there can be a specially created linker section
2806 to hold the stubs for that group. The name of the stub section is based
2807 upon the name of another section within that group with the suffix below
2810 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2811 create what appeared to be a linker stub section when it actually
2812 contained user code/data. For example, consider this fragment:
2814 const char * stubborn_problems[] = { "np" };
2816 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2819 .data.rel.local.stubborn_problems
2821 This then causes problems in arm32_arm_build_stubs() as it triggers:
2823 // Ignore non-stub sections.
2824 if (!strstr (stub_sec->name, STUB_SUFFIX))
2827 And so the section would be ignored instead of being processed. Hence
2828 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2830 #define STUB_SUFFIX ".__stub"
2832 /* One entry per long/short branch stub defined above. */
2834 DEF_STUB(long_branch_any_any) \
2835 DEF_STUB(long_branch_v4t_arm_thumb) \
2836 DEF_STUB(long_branch_thumb_only) \
2837 DEF_STUB(long_branch_v4t_thumb_thumb) \
2838 DEF_STUB(long_branch_v4t_thumb_arm) \
2839 DEF_STUB(short_branch_v4t_thumb_arm) \
2840 DEF_STUB(long_branch_any_arm_pic) \
2841 DEF_STUB(long_branch_any_thumb_pic) \
2842 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2843 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2844 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2845 DEF_STUB(long_branch_thumb_only_pic) \
2846 DEF_STUB(long_branch_any_tls_pic) \
2847 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2848 DEF_STUB(long_branch_arm_nacl) \
2849 DEF_STUB(long_branch_arm_nacl_pic) \
2850 DEF_STUB(cmse_branch_thumb_only) \
2851 DEF_STUB(a8_veneer_b_cond) \
2852 DEF_STUB(a8_veneer_b) \
2853 DEF_STUB(a8_veneer_bl) \
2854 DEF_STUB(a8_veneer_blx) \
2855 DEF_STUB(long_branch_thumb2_only) \
2856 DEF_STUB(long_branch_thumb2_only_pure)
2858 #define DEF_STUB(x) arm_stub_##x,
2859 enum elf32_arm_stub_type
2867 /* Note the first a8_veneer type. */
2868 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2872 const insn_sequence* template_sequence;
2876 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2877 static const stub_def stub_definitions[] =
2883 struct elf32_arm_stub_hash_entry
2885 /* Base hash table entry structure. */
2886 struct bfd_hash_entry root;
2888 /* The stub section. */
2891 /* Offset within stub_sec of the beginning of this stub. */
2892 bfd_vma stub_offset;
2894 /* Given the symbol's value and its section we can determine its final
2895 value when building the stubs (so the stub knows where to jump). */
2896 bfd_vma target_value;
2897 asection *target_section;
2899 /* Same as above but for the source of the branch to the stub. Used for
2900 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2901 such, source section does not need to be recorded since Cortex-A8 erratum
2902 workaround stubs are only generated when both source and target are in the
2904 bfd_vma source_value;
2906 /* The instruction which caused this stub to be generated (only valid for
2907 Cortex-A8 erratum workaround stubs at present). */
2908 unsigned long orig_insn;
2910 /* The stub type. */
2911 enum elf32_arm_stub_type stub_type;
2912 /* Its encoding size in bytes. */
2915 const insn_sequence *stub_template;
2916 /* The size of the template (number of entries). */
2917 int stub_template_size;
2919 /* The symbol table entry, if any, that this was derived from. */
2920 struct elf32_arm_link_hash_entry *h;
2922 /* Type of branch. */
2923 enum arm_st_branch_type branch_type;
2925 /* Where this stub is being called from, or, in the case of combined
2926 stub sections, the first input section in the group. */
2929 /* The name for the local symbol at the start of this stub. The
2930 stub name in the hash table has to be unique; this does not, so
2931 it can be friendlier. */
2935 /* Used to build a map of a section. This is required for mixed-endian
2938 typedef struct elf32_elf_section_map
2943 elf32_arm_section_map;
2945 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2949 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2950 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2951 VFP11_ERRATUM_ARM_VENEER,
2952 VFP11_ERRATUM_THUMB_VENEER
2954 elf32_vfp11_erratum_type;
2956 typedef struct elf32_vfp11_erratum_list
2958 struct elf32_vfp11_erratum_list *next;
2964 struct elf32_vfp11_erratum_list *veneer;
2965 unsigned int vfp_insn;
2969 struct elf32_vfp11_erratum_list *branch;
2973 elf32_vfp11_erratum_type type;
2975 elf32_vfp11_erratum_list;
2977 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2981 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2982 STM32L4XX_ERRATUM_VENEER
2984 elf32_stm32l4xx_erratum_type;
2986 typedef struct elf32_stm32l4xx_erratum_list
2988 struct elf32_stm32l4xx_erratum_list *next;
2994 struct elf32_stm32l4xx_erratum_list *veneer;
2999 struct elf32_stm32l4xx_erratum_list *branch;
3003 elf32_stm32l4xx_erratum_type type;
3005 elf32_stm32l4xx_erratum_list;
3010 INSERT_EXIDX_CANTUNWIND_AT_END
3012 arm_unwind_edit_type;
3014 /* A (sorted) list of edits to apply to an unwind table. */
3015 typedef struct arm_unwind_table_edit
3017 arm_unwind_edit_type type;
3018 /* Note: we sometimes want to insert an unwind entry corresponding to a
3019 section different from the one we're currently writing out, so record the
3020 (text) section this edit relates to here. */
3021 asection *linked_section;
3023 struct arm_unwind_table_edit *next;
3025 arm_unwind_table_edit;
3027 typedef struct _arm_elf_section_data
3029 /* Information about mapping symbols. */
3030 struct bfd_elf_section_data elf;
3031 unsigned int mapcount;
3032 unsigned int mapsize;
3033 elf32_arm_section_map *map;
3034 /* Information about CPU errata. */
3035 unsigned int erratumcount;
3036 elf32_vfp11_erratum_list *erratumlist;
3037 unsigned int stm32l4xx_erratumcount;
3038 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3039 unsigned int additional_reloc_count;
3040 /* Information about unwind tables. */
3043 /* Unwind info attached to a text section. */
3046 asection *arm_exidx_sec;
3049 /* Unwind info attached to an .ARM.exidx section. */
3052 arm_unwind_table_edit *unwind_edit_list;
3053 arm_unwind_table_edit *unwind_edit_tail;
3057 _arm_elf_section_data;
3059 #define elf32_arm_section_data(sec) \
3060 ((_arm_elf_section_data *) elf_section_data (sec))
3062 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3063 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3064 so may be created multiple times: we use an array of these entries whilst
3065 relaxing which we can refresh easily, then create stubs for each potentially
3066 erratum-triggering instruction once we've settled on a solution. */
3068 struct a8_erratum_fix
3073 bfd_vma target_offset;
3074 unsigned long orig_insn;
3076 enum elf32_arm_stub_type stub_type;
3077 enum arm_st_branch_type branch_type;
3080 /* A table of relocs applied to branches which might trigger Cortex-A8
3083 struct a8_erratum_reloc
3086 bfd_vma destination;
3087 struct elf32_arm_link_hash_entry *hash;
3088 const char *sym_name;
3089 unsigned int r_type;
3090 enum arm_st_branch_type branch_type;
3091 bfd_boolean non_a8_stub;
3094 /* The size of the thread control block. */
3097 /* ARM-specific information about a PLT entry, over and above the usual
3101 /* We reference count Thumb references to a PLT entry separately,
3102 so that we can emit the Thumb trampoline only if needed. */
3103 bfd_signed_vma thumb_refcount;
3105 /* Some references from Thumb code may be eliminated by BL->BLX
3106 conversion, so record them separately. */
3107 bfd_signed_vma maybe_thumb_refcount;
3109 /* How many of the recorded PLT accesses were from non-call relocations.
3110 This information is useful when deciding whether anything takes the
3111 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3112 non-call references to the function should resolve directly to the
3113 real runtime target. */
3114 unsigned int noncall_refcount;
3116 /* Since PLT entries have variable size if the Thumb prologue is
3117 used, we need to record the index into .got.plt instead of
3118 recomputing it from the PLT offset. */
3119 bfd_signed_vma got_offset;
3122 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3123 struct arm_local_iplt_info
3125 /* The information that is usually found in the generic ELF part of
3126 the hash table entry. */
3127 union gotplt_union root;
3129 /* The information that is usually found in the ARM-specific part of
3130 the hash table entry. */
3131 struct arm_plt_info arm;
3133 /* A list of all potential dynamic relocations against this symbol. */
3134 struct elf_dyn_relocs *dyn_relocs;
3137 /* Structure to handle FDPIC support for local functions. */
3138 struct fdpic_local {
3139 unsigned int funcdesc_cnt;
3140 unsigned int gotofffuncdesc_cnt;
3141 int funcdesc_offset;
3144 struct elf_arm_obj_tdata
3146 struct elf_obj_tdata root;
3148 /* tls_type for each local got entry. */
3149 char *local_got_tls_type;
3151 /* GOTPLT entries for TLS descriptors. */
3152 bfd_vma *local_tlsdesc_gotent;
3154 /* Information for local symbols that need entries in .iplt. */
3155 struct arm_local_iplt_info **local_iplt;
3157 /* Zero to warn when linking objects with incompatible enum sizes. */
3158 int no_enum_size_warning;
3160 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3161 int no_wchar_size_warning;
3163 /* Maintains FDPIC counters and funcdesc info. */
3164 struct fdpic_local *local_fdpic_cnts;
3167 #define elf_arm_tdata(bfd) \
3168 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3170 #define elf32_arm_local_got_tls_type(bfd) \
3171 (elf_arm_tdata (bfd)->local_got_tls_type)
3173 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3174 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3176 #define elf32_arm_local_iplt(bfd) \
3177 (elf_arm_tdata (bfd)->local_iplt)
3179 #define elf32_arm_local_fdpic_cnts(bfd) \
3180 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3182 #define is_arm_elf(bfd) \
3183 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3184 && elf_tdata (bfd) != NULL \
3185 && elf_object_id (bfd) == ARM_ELF_DATA)
3188 elf32_arm_mkobject (bfd *abfd)
3190 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3194 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3196 /* Structure to handle FDPIC support for extern functions. */
3197 struct fdpic_global {
3198 unsigned int gotofffuncdesc_cnt;
3199 unsigned int gotfuncdesc_cnt;
3200 unsigned int funcdesc_cnt;
3201 int funcdesc_offset;
3202 int gotfuncdesc_offset;
3205 /* Arm ELF linker hash entry. */
3206 struct elf32_arm_link_hash_entry
3208 struct elf_link_hash_entry root;
3210 /* ARM-specific PLT information. */
3211 struct arm_plt_info plt;
3213 #define GOT_UNKNOWN 0
3214 #define GOT_NORMAL 1
3215 #define GOT_TLS_GD 2
3216 #define GOT_TLS_IE 4
3217 #define GOT_TLS_GDESC 8
3218 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3219 unsigned int tls_type : 8;
3221 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3222 unsigned int is_iplt : 1;
3224 unsigned int unused : 23;
3226 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3227 starting at the end of the jump table. */
3228 bfd_vma tlsdesc_got;
3230 /* The symbol marking the real symbol location for exported thumb
3231 symbols with Arm stubs. */
3232 struct elf_link_hash_entry *export_glue;
3234 /* A pointer to the most recently used stub hash entry against this
3236 struct elf32_arm_stub_hash_entry *stub_cache;
3238 /* Counter for FDPIC relocations against this symbol. */
3239 struct fdpic_global fdpic_cnts;
3242 /* Traverse an arm ELF linker hash table. */
3243 #define elf32_arm_link_hash_traverse(table, func, info) \
3244 (elf_link_hash_traverse \
3246 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3249 /* Get the ARM elf linker hash table from a link_info structure. */
3250 #define elf32_arm_hash_table(p) \
3251 ((is_elf_hash_table ((p)->hash) \
3252 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3253 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3255 #define arm_stub_hash_lookup(table, string, create, copy) \
3256 ((struct elf32_arm_stub_hash_entry *) \
3257 bfd_hash_lookup ((table), (string), (create), (copy)))
3259 /* Array to keep track of which stub sections have been created, and
3260 information on stub grouping. */
3263 /* This is the section to which stubs in the group will be
3266 /* The stub section. */
3270 #define elf32_arm_compute_jump_table_size(htab) \
3271 ((htab)->next_tls_desc_index * 4)
3273 /* ARM ELF linker hash table. */
3274 struct elf32_arm_link_hash_table
3276 /* The main hash table. */
3277 struct elf_link_hash_table root;
3279 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3280 bfd_size_type thumb_glue_size;
3282 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3283 bfd_size_type arm_glue_size;
3285 /* The size in bytes of section containing the ARMv4 BX veneers. */
3286 bfd_size_type bx_glue_size;
3288 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3289 veneer has been populated. */
3290 bfd_vma bx_glue_offset[15];
3292 /* The size in bytes of the section containing glue for VFP11 erratum
3294 bfd_size_type vfp11_erratum_glue_size;
3296 /* The size in bytes of the section containing glue for STM32L4XX erratum
3298 bfd_size_type stm32l4xx_erratum_glue_size;
3300 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3301 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3302 elf32_arm_write_section(). */
3303 struct a8_erratum_fix *a8_erratum_fixes;
3304 unsigned int num_a8_erratum_fixes;
3306 /* An arbitrary input BFD chosen to hold the glue sections. */
3307 bfd * bfd_of_glue_owner;
3309 /* Nonzero to output a BE8 image. */
3312 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3313 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3316 /* The relocation to use for R_ARM_TARGET2 relocations. */
3319 /* 0 = Ignore R_ARM_V4BX.
3320 1 = Convert BX to MOV PC.
3321 2 = Generate v4 interworing stubs. */
3324 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3327 /* Whether we should fix the ARM1176 BLX immediate issue. */
3330 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3333 /* What sort of code sequences we should look for which may trigger the
3334 VFP11 denorm erratum. */
3335 bfd_arm_vfp11_fix vfp11_fix;
3337 /* Global counter for the number of fixes we have emitted. */
3338 int num_vfp11_fixes;
3340 /* What sort of code sequences we should look for which may trigger the
3341 STM32L4XX erratum. */
3342 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3344 /* Global counter for the number of fixes we have emitted. */
3345 int num_stm32l4xx_fixes;
3347 /* Nonzero to force PIC branch veneers. */
3350 /* The number of bytes in the initial entry in the PLT. */
3351 bfd_size_type plt_header_size;
3353 /* The number of bytes in the subsequent PLT etries. */
3354 bfd_size_type plt_entry_size;
3356 /* True if the target uses REL relocations. */
3357 bfd_boolean use_rel;
3359 /* Nonzero if import library must be a secure gateway import library
3360 as per ARMv8-M Security Extensions. */
3363 /* The import library whose symbols' address must remain stable in
3364 the import library generated. */
3367 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3368 bfd_vma next_tls_desc_index;
3370 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3371 bfd_vma num_tls_desc;
3373 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3376 /* Offset in .plt section of tls_arm_trampoline. */
3377 bfd_vma tls_trampoline;
3379 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3382 bfd_signed_vma refcount;
3386 /* For convenience in allocate_dynrelocs. */
3389 /* The amount of space used by the reserved portion of the sgotplt
3390 section, plus whatever space is used by the jump slots. */
3391 bfd_vma sgotplt_jump_table_size;
3393 /* The stub hash table. */
3394 struct bfd_hash_table stub_hash_table;
3396 /* Linker stub bfd. */
3399 /* Linker call-backs. */
3400 asection * (*add_stub_section) (const char *, asection *, asection *,
3402 void (*layout_sections_again) (void);
3404 /* Array to keep track of which stub sections have been created, and
3405 information on stub grouping. */
3406 struct map_stub *stub_group;
3408 /* Input stub section holding secure gateway veneers. */
3409 asection *cmse_stub_sec;
3411 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3412 start to be allocated. */
3413 bfd_vma new_cmse_stub_offset;
3415 /* Number of elements in stub_group. */
3416 unsigned int top_id;
3418 /* Assorted information used by elf32_arm_size_stubs. */
3419 unsigned int bfd_count;
3420 unsigned int top_index;
3421 asection **input_list;
3423 /* True if the target system uses FDPIC. */
3426 /* Fixup section. Used for FDPIC. */
3430 /* Add an FDPIC read-only fixup. */
3432 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3434 bfd_vma fixup_offset;
3436 fixup_offset = srofixup->reloc_count++ * 4;
3437 BFD_ASSERT (fixup_offset < srofixup->size);
3438 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3442 ctz (unsigned int mask)
3444 #if GCC_VERSION >= 3004
3445 return __builtin_ctz (mask);
3449 for (i = 0; i < 8 * sizeof (mask); i++)
3460 elf32_arm_popcount (unsigned int mask)
3462 #if GCC_VERSION >= 3004
3463 return __builtin_popcount (mask);
3468 for (i = 0; i < 8 * sizeof (mask); i++)
3478 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3479 asection *sreloc, Elf_Internal_Rela *rel);
3482 arm_elf_fill_funcdesc(bfd *output_bfd,
3483 struct bfd_link_info *info,
3484 int *funcdesc_offset,
3488 bfd_vma dynreloc_value,
3491 if ((*funcdesc_offset & 1) == 0)
3493 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3494 asection *sgot = globals->root.sgot;
3496 if (bfd_link_pic(info))
3498 asection *srelgot = globals->root.srelgot;
3499 Elf_Internal_Rela outrel;
3501 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3502 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3503 outrel.r_addend = 0;
3505 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3506 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3507 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3511 struct elf_link_hash_entry *hgot = globals->root.hgot;
3512 bfd_vma got_value = hgot->root.u.def.value
3513 + hgot->root.u.def.section->output_section->vma
3514 + hgot->root.u.def.section->output_offset;
3516 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3517 sgot->output_section->vma + sgot->output_offset
3519 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3520 sgot->output_section->vma + sgot->output_offset
3522 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3523 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3525 *funcdesc_offset |= 1;
3529 /* Create an entry in an ARM ELF linker hash table. */
3531 static struct bfd_hash_entry *
3532 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3533 struct bfd_hash_table * table,
3534 const char * string)
3536 struct elf32_arm_link_hash_entry * ret =
3537 (struct elf32_arm_link_hash_entry *) entry;
3539 /* Allocate the structure if it has not already been allocated by a
3542 ret = (struct elf32_arm_link_hash_entry *)
3543 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3545 return (struct bfd_hash_entry *) ret;
3547 /* Call the allocation method of the superclass. */
3548 ret = ((struct elf32_arm_link_hash_entry *)
3549 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3553 ret->tls_type = GOT_UNKNOWN;
3554 ret->tlsdesc_got = (bfd_vma) -1;
3555 ret->plt.thumb_refcount = 0;
3556 ret->plt.maybe_thumb_refcount = 0;
3557 ret->plt.noncall_refcount = 0;
3558 ret->plt.got_offset = -1;
3559 ret->is_iplt = FALSE;
3560 ret->export_glue = NULL;
3562 ret->stub_cache = NULL;
3564 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3565 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3566 ret->fdpic_cnts.funcdesc_cnt = 0;
3567 ret->fdpic_cnts.funcdesc_offset = -1;
3568 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3571 return (struct bfd_hash_entry *) ret;
3574 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3578 elf32_arm_allocate_local_sym_info (bfd *abfd)
3580 if (elf_local_got_refcounts (abfd) == NULL)
3582 bfd_size_type num_syms;
3586 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3587 size = num_syms * (sizeof (bfd_signed_vma)
3589 + sizeof (struct arm_local_iplt_info *)
3590 + sizeof (struct fdpic_local)
3592 data = bfd_zalloc (abfd, size);
3596 /* It is important that these all be allocated in descending
3597 order of required alignment, so that arrays allocated later
3598 will be sufficiently aligned. */
3599 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3600 data += num_syms * sizeof (bfd_signed_vma);
3602 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3603 data += num_syms * sizeof (bfd_vma);
3605 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3606 data += num_syms * sizeof (struct arm_local_iplt_info *);
3608 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3609 data += num_syms * sizeof (struct fdpic_local);
3611 elf32_arm_local_got_tls_type (abfd) = data;
3612 #if GCC_VERSION >= 3000
3613 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3614 <= __alignof__ (*elf_local_got_refcounts (abfd)));
3615 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3616 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3617 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3618 <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3619 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3620 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3626 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3627 to input bfd ABFD. Create the information if it doesn't already exist.
3628 Return null if an allocation fails. */
3630 static struct arm_local_iplt_info *
3631 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3633 struct arm_local_iplt_info **ptr;
3635 if (!elf32_arm_allocate_local_sym_info (abfd))
3638 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3639 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3641 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3645 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3646 in ABFD's symbol table. If the symbol is global, H points to its
3647 hash table entry, otherwise H is null.
3649 Return true if the symbol does have PLT information. When returning
3650 true, point *ROOT_PLT at the target-independent reference count/offset
3651 union and *ARM_PLT at the ARM-specific information. */
3654 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3655 struct elf32_arm_link_hash_entry *h,
3656 unsigned long r_symndx, union gotplt_union **root_plt,
3657 struct arm_plt_info **arm_plt)
3659 struct arm_local_iplt_info *local_iplt;
3661 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3666 *root_plt = &h->root.plt;
3671 if (elf32_arm_local_iplt (abfd) == NULL)
3674 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3675 if (local_iplt == NULL)
3678 *root_plt = &local_iplt->root;
3679 *arm_plt = &local_iplt->arm;
3683 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3685 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3689 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3690 struct arm_plt_info *arm_plt)
3692 struct elf32_arm_link_hash_table *htab;
3694 htab = elf32_arm_hash_table (info);
3696 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3697 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3700 /* Return a pointer to the head of the dynamic reloc list that should
3701 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3702 ABFD's symbol table. Return null if an error occurs. */
3704 static struct elf_dyn_relocs **
3705 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3706 Elf_Internal_Sym *isym)
3708 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3710 struct arm_local_iplt_info *local_iplt;
3712 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3713 if (local_iplt == NULL)
3715 return &local_iplt->dyn_relocs;
3719 /* Track dynamic relocs needed for local syms too.
3720 We really need local syms available to do this
3725 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3729 vpp = &elf_section_data (s)->local_dynrel;
3730 return (struct elf_dyn_relocs **) vpp;
3734 /* Initialize an entry in the stub hash table. */
3736 static struct bfd_hash_entry *
3737 stub_hash_newfunc (struct bfd_hash_entry *entry,
3738 struct bfd_hash_table *table,
3741 /* Allocate the structure if it has not already been allocated by a
3745 entry = (struct bfd_hash_entry *)
3746 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3751 /* Call the allocation method of the superclass. */
3752 entry = bfd_hash_newfunc (entry, table, string);
3755 struct elf32_arm_stub_hash_entry *eh;
3757 /* Initialize the local fields. */
3758 eh = (struct elf32_arm_stub_hash_entry *) entry;
3759 eh->stub_sec = NULL;
3760 eh->stub_offset = (bfd_vma) -1;
3761 eh->source_value = 0;
3762 eh->target_value = 0;
3763 eh->target_section = NULL;
3765 eh->stub_type = arm_stub_none;
3767 eh->stub_template = NULL;
3768 eh->stub_template_size = -1;
3771 eh->output_name = NULL;
3777 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3778 shortcuts to them in our hash table. */
3781 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3783 struct elf32_arm_link_hash_table *htab;
3785 htab = elf32_arm_hash_table (info);
3789 if (! _bfd_elf_create_got_section (dynobj, info))
3792 /* Also create .rofixup. */
3795 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3796 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3797 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3798 if (htab->srofixup == NULL
3799 || !bfd_set_section_alignment (htab->srofixup, 2))
3806 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3809 create_ifunc_sections (struct bfd_link_info *info)
3811 struct elf32_arm_link_hash_table *htab;
3812 const struct elf_backend_data *bed;
3817 htab = elf32_arm_hash_table (info);
3818 dynobj = htab->root.dynobj;
3819 bed = get_elf_backend_data (dynobj);
3820 flags = bed->dynamic_sec_flags;
3822 if (htab->root.iplt == NULL)
3824 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3825 flags | SEC_READONLY | SEC_CODE);
3827 || !bfd_set_section_alignment (s, bed->plt_alignment))
3829 htab->root.iplt = s;
3832 if (htab->root.irelplt == NULL)
3834 s = bfd_make_section_anyway_with_flags (dynobj,
3835 RELOC_SECTION (htab, ".iplt"),
3836 flags | SEC_READONLY);
3838 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3840 htab->root.irelplt = s;
3843 if (htab->root.igotplt == NULL)
3845 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3847 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3849 htab->root.igotplt = s;
3854 /* Determine if we're dealing with a Thumb only architecture. */
3857 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3860 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3861 Tag_CPU_arch_profile);
3864 return profile == 'M';
3866 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3868 /* Force return logic to be reviewed for each new architecture. */
3869 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3871 if (arch == TAG_CPU_ARCH_V6_M
3872 || arch == TAG_CPU_ARCH_V6S_M
3873 || arch == TAG_CPU_ARCH_V7E_M
3874 || arch == TAG_CPU_ARCH_V8M_BASE
3875 || arch == TAG_CPU_ARCH_V8M_MAIN
3876 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3882 /* Determine if we're dealing with a Thumb-2 object. */
3885 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3888 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3892 return thumb_isa == 2;
3894 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3896 /* Force return logic to be reviewed for each new architecture. */
3897 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3899 return (arch == TAG_CPU_ARCH_V6T2
3900 || arch == TAG_CPU_ARCH_V7
3901 || arch == TAG_CPU_ARCH_V7E_M
3902 || arch == TAG_CPU_ARCH_V8
3903 || arch == TAG_CPU_ARCH_V8R
3904 || arch == TAG_CPU_ARCH_V8M_MAIN
3905 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3908 /* Determine whether Thumb-2 BL instruction is available. */
3911 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3914 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3916 /* Force return logic to be reviewed for each new architecture. */
3917 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3919 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3920 return (arch == TAG_CPU_ARCH_V6T2
3921 || arch >= TAG_CPU_ARCH_V7);
3924 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3925 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3929 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3931 struct elf32_arm_link_hash_table *htab;
3933 htab = elf32_arm_hash_table (info);
3937 if (!htab->root.sgot && !create_got_section (dynobj, info))
3940 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3943 if (htab->root.target_os == is_vxworks)
3945 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3948 if (bfd_link_pic (info))
3950 htab->plt_header_size = 0;
3951 htab->plt_entry_size
3952 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3956 htab->plt_header_size
3957 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3958 htab->plt_entry_size
3959 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3962 if (elf_elfheader (dynobj))
3963 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3968 Test for thumb only architectures. Note - we cannot just call
3969 using_thumb_only() as the attributes in the output bfd have not been
3970 initialised at this point, so instead we use the input bfd. */
3971 bfd * saved_obfd = htab->obfd;
3973 htab->obfd = dynobj;
3974 if (using_thumb_only (htab))
3976 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3977 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3979 htab->obfd = saved_obfd;
3982 if (htab->fdpic_p) {
3983 htab->plt_header_size = 0;
3984 if (info->flags & DF_BIND_NOW)
3985 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3987 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3990 if (!htab->root.splt
3991 || !htab->root.srelplt
3992 || !htab->root.sdynbss
3993 || (!bfd_link_pic (info) && !htab->root.srelbss))
3999 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4002 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4003 struct elf_link_hash_entry *dir,
4004 struct elf_link_hash_entry *ind)
4006 struct elf32_arm_link_hash_entry *edir, *eind;
4008 edir = (struct elf32_arm_link_hash_entry *) dir;
4009 eind = (struct elf32_arm_link_hash_entry *) ind;
4011 if (ind->root.type == bfd_link_hash_indirect)
4013 /* Copy over PLT info. */
4014 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4015 eind->plt.thumb_refcount = 0;
4016 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4017 eind->plt.maybe_thumb_refcount = 0;
4018 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4019 eind->plt.noncall_refcount = 0;
4021 /* Copy FDPIC counters. */
4022 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4023 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4024 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4026 /* We should only allocate a function to .iplt once the final
4027 symbol information is known. */
4028 BFD_ASSERT (!eind->is_iplt);
4030 if (dir->got.refcount <= 0)
4032 edir->tls_type = eind->tls_type;
4033 eind->tls_type = GOT_UNKNOWN;
4037 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4040 /* Destroy an ARM elf linker hash table. */
4043 elf32_arm_link_hash_table_free (bfd *obfd)
4045 struct elf32_arm_link_hash_table *ret
4046 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4048 bfd_hash_table_free (&ret->stub_hash_table);
4049 _bfd_elf_link_hash_table_free (obfd);
4052 /* Create an ARM elf linker hash table. */
4054 static struct bfd_link_hash_table *
4055 elf32_arm_link_hash_table_create (bfd *abfd)
4057 struct elf32_arm_link_hash_table *ret;
4058 size_t amt = sizeof (struct elf32_arm_link_hash_table);
4060 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4064 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4065 elf32_arm_link_hash_newfunc,
4066 sizeof (struct elf32_arm_link_hash_entry),
4073 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4074 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4075 #ifdef FOUR_WORD_PLT
4076 ret->plt_header_size = 16;
4077 ret->plt_entry_size = 16;
4079 ret->plt_header_size = 20;
4080 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4082 ret->use_rel = TRUE;
4086 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4087 sizeof (struct elf32_arm_stub_hash_entry)))
4089 _bfd_elf_link_hash_table_free (abfd);
4092 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4094 return &ret->root.root;
4097 /* Determine what kind of NOPs are available. */
4100 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4102 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4105 /* Force return logic to be reviewed for each new architecture. */
4106 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4108 return (arch == TAG_CPU_ARCH_V6T2
4109 || arch == TAG_CPU_ARCH_V6K
4110 || arch == TAG_CPU_ARCH_V7
4111 || arch == TAG_CPU_ARCH_V8
4112 || arch == TAG_CPU_ARCH_V8R);
4116 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4120 case arm_stub_long_branch_thumb_only:
4121 case arm_stub_long_branch_thumb2_only:
4122 case arm_stub_long_branch_thumb2_only_pure:
4123 case arm_stub_long_branch_v4t_thumb_arm:
4124 case arm_stub_short_branch_v4t_thumb_arm:
4125 case arm_stub_long_branch_v4t_thumb_arm_pic:
4126 case arm_stub_long_branch_v4t_thumb_tls_pic:
4127 case arm_stub_long_branch_thumb_only_pic:
4128 case arm_stub_cmse_branch_thumb_only:
4139 /* Determine the type of stub needed, if any, for a call. */
4141 static enum elf32_arm_stub_type
4142 arm_type_of_stub (struct bfd_link_info *info,
4143 asection *input_sec,
4144 const Elf_Internal_Rela *rel,
4145 unsigned char st_type,
4146 enum arm_st_branch_type *actual_branch_type,
4147 struct elf32_arm_link_hash_entry *hash,
4148 bfd_vma destination,
4154 bfd_signed_vma branch_offset;
4155 unsigned int r_type;
4156 struct elf32_arm_link_hash_table * globals;
4157 bfd_boolean thumb2, thumb2_bl, thumb_only;
4158 enum elf32_arm_stub_type stub_type = arm_stub_none;
4160 enum arm_st_branch_type branch_type = *actual_branch_type;
4161 union gotplt_union *root_plt;
4162 struct arm_plt_info *arm_plt;
4166 if (branch_type == ST_BRANCH_LONG)
4169 globals = elf32_arm_hash_table (info);
4170 if (globals == NULL)
4173 thumb_only = using_thumb_only (globals);
4174 thumb2 = using_thumb2 (globals);
4175 thumb2_bl = using_thumb2_bl (globals);
4177 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4179 /* True for architectures that implement the thumb2 movw instruction. */
4180 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4182 /* Determine where the call point is. */
4183 location = (input_sec->output_offset
4184 + input_sec->output_section->vma
4187 r_type = ELF32_R_TYPE (rel->r_info);
4189 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4190 are considering a function call relocation. */
4191 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4192 || r_type == R_ARM_THM_JUMP19)
4193 && branch_type == ST_BRANCH_TO_ARM)
4194 branch_type = ST_BRANCH_TO_THUMB;
4196 /* For TLS call relocs, it is the caller's responsibility to provide
4197 the address of the appropriate trampoline. */
4198 if (r_type != R_ARM_TLS_CALL
4199 && r_type != R_ARM_THM_TLS_CALL
4200 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4201 ELF32_R_SYM (rel->r_info), &root_plt,
4203 && root_plt->offset != (bfd_vma) -1)
4207 if (hash == NULL || hash->is_iplt)
4208 splt = globals->root.iplt;
4210 splt = globals->root.splt;
4215 /* Note when dealing with PLT entries: the main PLT stub is in
4216 ARM mode, so if the branch is in Thumb mode, another
4217 Thumb->ARM stub will be inserted later just before the ARM
4218 PLT stub. If a long branch stub is needed, we'll add a
4219 Thumb->Arm one and branch directly to the ARM PLT entry.
4220 Here, we have to check if a pre-PLT Thumb->ARM stub
4221 is needed and if it will be close enough. */
4223 destination = (splt->output_section->vma
4224 + splt->output_offset
4225 + root_plt->offset);
4228 /* Thumb branch/call to PLT: it can become a branch to ARM
4229 or to Thumb. We must perform the same checks and
4230 corrections as in elf32_arm_final_link_relocate. */
4231 if ((r_type == R_ARM_THM_CALL)
4232 || (r_type == R_ARM_THM_JUMP24))
4234 if (globals->use_blx
4235 && r_type == R_ARM_THM_CALL
4238 /* If the Thumb BLX instruction is available, convert
4239 the BL to a BLX instruction to call the ARM-mode
4241 branch_type = ST_BRANCH_TO_ARM;
4246 /* Target the Thumb stub before the ARM PLT entry. */
4247 destination -= PLT_THUMB_STUB_SIZE;
4248 branch_type = ST_BRANCH_TO_THUMB;
4253 branch_type = ST_BRANCH_TO_ARM;
4257 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4258 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4260 branch_offset = (bfd_signed_vma)(destination - location);
4262 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4263 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4265 /* Handle cases where:
4266 - this call goes too far (different Thumb/Thumb2 max
4268 - it's a Thumb->Arm call and blx is not available, or it's a
4269 Thumb->Arm branch (not bl). A stub is needed in this case,
4270 but only if this call is not through a PLT entry. Indeed,
4271 PLT stubs handle mode switching already. */
4273 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4274 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4276 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4277 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4279 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4280 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4281 && (r_type == R_ARM_THM_JUMP19))
4282 || (branch_type == ST_BRANCH_TO_ARM
4283 && (((r_type == R_ARM_THM_CALL
4284 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4285 || (r_type == R_ARM_THM_JUMP24)
4286 || (r_type == R_ARM_THM_JUMP19))
4289 /* If we need to insert a Thumb-Thumb long branch stub to a
4290 PLT, use one that branches directly to the ARM PLT
4291 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4292 stub, undo this now. */
4293 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4295 branch_type = ST_BRANCH_TO_ARM;
4296 branch_offset += PLT_THUMB_STUB_SIZE;
4299 if (branch_type == ST_BRANCH_TO_THUMB)
4301 /* Thumb to thumb. */
4304 if (input_sec->flags & SEC_ELF_PURECODE)
4306 (_("%pB(%pA): warning: long branch veneers used in"
4307 " section with SHF_ARM_PURECODE section"
4308 " attribute is only supported for M-profile"
4309 " targets that implement the movw instruction"),
4310 input_bfd, input_sec);
4312 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4314 ? ((globals->use_blx
4315 && (r_type == R_ARM_THM_CALL))
4316 /* V5T and above. Stub starts with ARM code, so
4317 we must be able to switch mode before
4318 reaching it, which is only possible for 'bl'
4319 (ie R_ARM_THM_CALL relocation). */
4320 ? arm_stub_long_branch_any_thumb_pic
4321 /* On V4T, use Thumb code only. */
4322 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4324 /* non-PIC stubs. */
4325 : ((globals->use_blx
4326 && (r_type == R_ARM_THM_CALL))
4327 /* V5T and above. */
4328 ? arm_stub_long_branch_any_any
4330 : arm_stub_long_branch_v4t_thumb_thumb);
4334 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4335 stub_type = arm_stub_long_branch_thumb2_only_pure;
4338 if (input_sec->flags & SEC_ELF_PURECODE)
4340 (_("%pB(%pA): warning: long branch veneers used in"
4341 " section with SHF_ARM_PURECODE section"
4342 " attribute is only supported for M-profile"
4343 " targets that implement the movw instruction"),
4344 input_bfd, input_sec);
4346 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4348 ? arm_stub_long_branch_thumb_only_pic
4350 : (thumb2 ? arm_stub_long_branch_thumb2_only
4351 : arm_stub_long_branch_thumb_only);
4357 if (input_sec->flags & SEC_ELF_PURECODE)
4359 (_("%pB(%pA): warning: long branch veneers used in"
4360 " section with SHF_ARM_PURECODE section"
4361 " attribute is only supported" " for M-profile"
4362 " targets that implement the movw instruction"),
4363 input_bfd, input_sec);
4367 && sym_sec->owner != NULL
4368 && !INTERWORK_FLAG (sym_sec->owner))
4371 (_("%pB(%s): warning: interworking not enabled;"
4372 " first occurrence: %pB: %s call to %s"),
4373 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4377 (bfd_link_pic (info) | globals->pic_veneer)
4379 ? (r_type == R_ARM_THM_TLS_CALL
4380 /* TLS PIC stubs. */
4381 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4382 : arm_stub_long_branch_v4t_thumb_tls_pic)
4383 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4384 /* V5T PIC and above. */
4385 ? arm_stub_long_branch_any_arm_pic
4387 : arm_stub_long_branch_v4t_thumb_arm_pic))
4389 /* non-PIC stubs. */
4390 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4391 /* V5T and above. */
4392 ? arm_stub_long_branch_any_any
4394 : arm_stub_long_branch_v4t_thumb_arm);
4396 /* Handle v4t short branches. */
4397 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4398 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4399 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4400 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4404 else if (r_type == R_ARM_CALL
4405 || r_type == R_ARM_JUMP24
4406 || r_type == R_ARM_PLT32
4407 || r_type == R_ARM_TLS_CALL)
4409 if (input_sec->flags & SEC_ELF_PURECODE)
4411 (_("%pB(%pA): warning: long branch veneers used in"
4412 " section with SHF_ARM_PURECODE section"
4413 " attribute is only supported for M-profile"
4414 " targets that implement the movw instruction"),
4415 input_bfd, input_sec);
4416 if (branch_type == ST_BRANCH_TO_THUMB)
4421 && sym_sec->owner != NULL
4422 && !INTERWORK_FLAG (sym_sec->owner))
4425 (_("%pB(%s): warning: interworking not enabled;"
4426 " first occurrence: %pB: %s call to %s"),
4427 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4430 /* We have an extra 2-bytes reach because of
4431 the mode change (bit 24 (H) of BLX encoding). */
4432 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4433 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4434 || (r_type == R_ARM_CALL && !globals->use_blx)
4435 || (r_type == R_ARM_JUMP24)
4436 || (r_type == R_ARM_PLT32))
4438 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4440 ? ((globals->use_blx)
4441 /* V5T and above. */
4442 ? arm_stub_long_branch_any_thumb_pic
4444 : arm_stub_long_branch_v4t_arm_thumb_pic)
4446 /* non-PIC stubs. */
4447 : ((globals->use_blx)
4448 /* V5T and above. */
4449 ? arm_stub_long_branch_any_any
4451 : arm_stub_long_branch_v4t_arm_thumb);
4457 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4458 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4461 (bfd_link_pic (info) | globals->pic_veneer)
4463 ? (r_type == R_ARM_TLS_CALL
4465 ? arm_stub_long_branch_any_tls_pic
4466 : (globals->root.target_os == is_nacl
4467 ? arm_stub_long_branch_arm_nacl_pic
4468 : arm_stub_long_branch_any_arm_pic))
4469 /* non-PIC stubs. */
4470 : (globals->root.target_os == is_nacl
4471 ? arm_stub_long_branch_arm_nacl
4472 : arm_stub_long_branch_any_any);
4477 /* If a stub is needed, record the actual destination type. */
4478 if (stub_type != arm_stub_none)
4479 *actual_branch_type = branch_type;
4484 /* Build a name for an entry in the stub hash table. */
4487 elf32_arm_stub_name (const asection *input_section,
4488 const asection *sym_sec,
4489 const struct elf32_arm_link_hash_entry *hash,
4490 const Elf_Internal_Rela *rel,
4491 enum elf32_arm_stub_type stub_type)
4498 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4499 stub_name = (char *) bfd_malloc (len);
4500 if (stub_name != NULL)
4501 sprintf (stub_name, "%08x_%s+%x_%d",
4502 input_section->id & 0xffffffff,
4503 hash->root.root.root.string,
4504 (int) rel->r_addend & 0xffffffff,
4509 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4510 stub_name = (char *) bfd_malloc (len);
4511 if (stub_name != NULL)
4512 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4513 input_section->id & 0xffffffff,
4514 sym_sec->id & 0xffffffff,
4515 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4516 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4517 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4518 (int) rel->r_addend & 0xffffffff,
4525 /* Look up an entry in the stub hash. Stub entries are cached because
4526 creating the stub name takes a bit of time. */
4528 static struct elf32_arm_stub_hash_entry *
4529 elf32_arm_get_stub_entry (const asection *input_section,
4530 const asection *sym_sec,
4531 struct elf_link_hash_entry *hash,
4532 const Elf_Internal_Rela *rel,
4533 struct elf32_arm_link_hash_table *htab,
4534 enum elf32_arm_stub_type stub_type)
4536 struct elf32_arm_stub_hash_entry *stub_entry;
4537 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4538 const asection *id_sec;
4540 if ((input_section->flags & SEC_CODE) == 0)
4543 /* If the input section is the CMSE stubs one and it needs a long
4544 branch stub to reach it's final destination, give up with an
4545 error message: this is not supported. See PR ld/24709. */
4546 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen(CMSE_STUB_NAME)))
4548 bfd *output_bfd = htab->obfd;
4549 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4551 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4552 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4554 (uint64_t)out_sec->output_section->vma
4555 + out_sec->output_offset,
4556 (uint64_t)sym_sec->output_section->vma
4557 + sym_sec->output_offset
4558 + h->root.root.u.def.value);
4559 /* Exit, rather than leave incompletely processed
4564 /* If this input section is part of a group of sections sharing one
4565 stub section, then use the id of the first section in the group.
4566 Stub names need to include a section id, as there may well be
4567 more than one stub used to reach say, printf, and we need to
4568 distinguish between them. */
4569 BFD_ASSERT (input_section->id <= htab->top_id);
4570 id_sec = htab->stub_group[input_section->id].link_sec;
4572 if (h != NULL && h->stub_cache != NULL
4573 && h->stub_cache->h == h
4574 && h->stub_cache->id_sec == id_sec
4575 && h->stub_cache->stub_type == stub_type)
4577 stub_entry = h->stub_cache;
4583 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4584 if (stub_name == NULL)
4587 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4588 stub_name, FALSE, FALSE);
4590 h->stub_cache = stub_entry;
4598 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4602 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4604 if (stub_type >= max_stub_type)
4605 abort (); /* Should be unreachable. */
4609 case arm_stub_cmse_branch_thumb_only:
4616 abort (); /* Should be unreachable. */
4619 /* Required alignment (as a power of 2) for the dedicated section holding
4620 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4621 with input sections. */
4624 arm_dedicated_stub_output_section_required_alignment
4625 (enum elf32_arm_stub_type stub_type)
4627 if (stub_type >= max_stub_type)
4628 abort (); /* Should be unreachable. */
4632 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4634 case arm_stub_cmse_branch_thumb_only:
4638 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4642 abort (); /* Should be unreachable. */
4645 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4646 NULL if veneers of this type are interspersed with input sections. */
4649 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4651 if (stub_type >= max_stub_type)
4652 abort (); /* Should be unreachable. */
4656 case arm_stub_cmse_branch_thumb_only:
4657 return CMSE_STUB_NAME;
4660 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4664 abort (); /* Should be unreachable. */
4667 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4668 returns the address of the hash table field in HTAB holding a pointer to the
4669 corresponding input section. Otherwise, returns NULL. */
4672 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4673 enum elf32_arm_stub_type stub_type)
4675 if (stub_type >= max_stub_type)
4676 abort (); /* Should be unreachable. */
4680 case arm_stub_cmse_branch_thumb_only:
4681 return &htab->cmse_stub_sec;
4684 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4688 abort (); /* Should be unreachable. */
4691 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4692 is the section that branch into veneer and can be NULL if stub should go in
4693 a dedicated output section. Returns a pointer to the stub section, and the
4694 section to which the stub section will be attached (in *LINK_SEC_P).
4695 LINK_SEC_P may be NULL. */
4698 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4699 struct elf32_arm_link_hash_table *htab,
4700 enum elf32_arm_stub_type stub_type)
4702 asection *link_sec, *out_sec, **stub_sec_p;
4703 const char *stub_sec_prefix;
4704 bfd_boolean dedicated_output_section =
4705 arm_dedicated_stub_output_section_required (stub_type);
4708 if (dedicated_output_section)
4710 bfd *output_bfd = htab->obfd;
4711 const char *out_sec_name =
4712 arm_dedicated_stub_output_section_name (stub_type);
4714 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4715 stub_sec_prefix = out_sec_name;
4716 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4717 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4718 if (out_sec == NULL)
4720 _bfd_error_handler (_("no address assigned to the veneers output "
4721 "section %s"), out_sec_name);
4727 BFD_ASSERT (section->id <= htab->top_id);
4728 link_sec = htab->stub_group[section->id].link_sec;
4729 BFD_ASSERT (link_sec != NULL);
4730 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4731 if (*stub_sec_p == NULL)
4732 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4733 stub_sec_prefix = link_sec->name;
4734 out_sec = link_sec->output_section;
4735 align = htab->root.target_os == is_nacl ? 4 : 3;
4738 if (*stub_sec_p == NULL)
4744 namelen = strlen (stub_sec_prefix);
4745 len = namelen + sizeof (STUB_SUFFIX);
4746 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4750 memcpy (s_name, stub_sec_prefix, namelen);
4751 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4752 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4754 if (*stub_sec_p == NULL)
4757 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4758 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4762 if (!dedicated_output_section)
4763 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4766 *link_sec_p = link_sec;
4771 /* Add a new stub entry to the stub hash. Not all fields of the new
4772 stub entry are initialised. */
4774 static struct elf32_arm_stub_hash_entry *
4775 elf32_arm_add_stub (const char *stub_name, asection *section,
4776 struct elf32_arm_link_hash_table *htab,
4777 enum elf32_arm_stub_type stub_type)
4781 struct elf32_arm_stub_hash_entry *stub_entry;
4783 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4785 if (stub_sec == NULL)
4788 /* Enter this entry into the linker stub hash table. */
4789 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4791 if (stub_entry == NULL)
4793 if (section == NULL)
4795 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4796 section->owner, stub_name);
4800 stub_entry->stub_sec = stub_sec;
4801 stub_entry->stub_offset = (bfd_vma) -1;
4802 stub_entry->id_sec = link_sec;
4807 /* Store an Arm insn into an output section not processed by
4808 elf32_arm_write_section. */
4811 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4812 bfd * output_bfd, bfd_vma val, void * ptr)
4814 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4815 bfd_putl32 (val, ptr);
4817 bfd_putb32 (val, ptr);
4820 /* Store a 16-bit Thumb insn into an output section not processed by
4821 elf32_arm_write_section. */
4824 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4825 bfd * output_bfd, bfd_vma val, void * ptr)
4827 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4828 bfd_putl16 (val, ptr);
4830 bfd_putb16 (val, ptr);
4833 /* Store a Thumb2 insn into an output section not processed by
4834 elf32_arm_write_section. */
4837 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4838 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4840 /* T2 instructions are 16-bit streamed. */
4841 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4843 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4844 bfd_putl16 ((val & 0xffff), ptr + 2);
4848 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4849 bfd_putb16 ((val & 0xffff), ptr + 2);
4853 /* If it's possible to change R_TYPE to a more efficient access
4854 model, return the new reloc type. */
4857 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4858 struct elf_link_hash_entry *h)
4860 int is_local = (h == NULL);
4862 if (bfd_link_dll (info)
4863 || (h && h->root.type == bfd_link_hash_undefweak))
4866 /* We do not support relaxations for Old TLS models. */
4869 case R_ARM_TLS_GOTDESC:
4870 case R_ARM_TLS_CALL:
4871 case R_ARM_THM_TLS_CALL:
4872 case R_ARM_TLS_DESCSEQ:
4873 case R_ARM_THM_TLS_DESCSEQ:
4874 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4880 static bfd_reloc_status_type elf32_arm_final_link_relocate
4881 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4882 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4883 const char *, unsigned char, enum arm_st_branch_type,
4884 struct elf_link_hash_entry *, bfd_boolean *, char **);
4887 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4891 case arm_stub_a8_veneer_b_cond:
4892 case arm_stub_a8_veneer_b:
4893 case arm_stub_a8_veneer_bl:
4896 case arm_stub_long_branch_any_any:
4897 case arm_stub_long_branch_v4t_arm_thumb:
4898 case arm_stub_long_branch_thumb_only:
4899 case arm_stub_long_branch_thumb2_only:
4900 case arm_stub_long_branch_thumb2_only_pure:
4901 case arm_stub_long_branch_v4t_thumb_thumb:
4902 case arm_stub_long_branch_v4t_thumb_arm:
4903 case arm_stub_short_branch_v4t_thumb_arm:
4904 case arm_stub_long_branch_any_arm_pic:
4905 case arm_stub_long_branch_any_thumb_pic:
4906 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4907 case arm_stub_long_branch_v4t_arm_thumb_pic:
4908 case arm_stub_long_branch_v4t_thumb_arm_pic:
4909 case arm_stub_long_branch_thumb_only_pic:
4910 case arm_stub_long_branch_any_tls_pic:
4911 case arm_stub_long_branch_v4t_thumb_tls_pic:
4912 case arm_stub_cmse_branch_thumb_only:
4913 case arm_stub_a8_veneer_blx:
4916 case arm_stub_long_branch_arm_nacl:
4917 case arm_stub_long_branch_arm_nacl_pic:
4921 abort (); /* Should be unreachable. */
4925 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4926 veneering (TRUE) or have their own symbol (FALSE). */
4929 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4931 if (stub_type >= max_stub_type)
4932 abort (); /* Should be unreachable. */
4936 case arm_stub_cmse_branch_thumb_only:
4943 abort (); /* Should be unreachable. */
4946 /* Returns the padding needed for the dedicated section used stubs of type
4950 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4952 if (stub_type >= max_stub_type)
4953 abort (); /* Should be unreachable. */
4957 case arm_stub_cmse_branch_thumb_only:
4964 abort (); /* Should be unreachable. */
4967 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4968 returns the address of the hash table field in HTAB holding the offset at
4969 which new veneers should be layed out in the stub section. */
4972 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4973 enum elf32_arm_stub_type stub_type)
4977 case arm_stub_cmse_branch_thumb_only:
4978 return &htab->new_cmse_stub_offset;
4981 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4987 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4991 bfd_boolean removed_sg_veneer;
4992 struct elf32_arm_stub_hash_entry *stub_entry;
4993 struct elf32_arm_link_hash_table *globals;
4994 struct bfd_link_info *info;
5001 const insn_sequence *template_sequence;
5003 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5004 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5006 int just_allocated = 0;
5008 /* Massage our args to the form they really have. */
5009 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5010 info = (struct bfd_link_info *) in_arg;
5012 /* Fail if the target section could not be assigned to an output
5013 section. The user should fix his linker script. */
5014 if (stub_entry->target_section->output_section == NULL
5015 && info->non_contiguous_regions)
5016 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5017 "Retry without --enable-non-contiguous-regions.\n"),
5018 stub_entry->target_section);
5020 globals = elf32_arm_hash_table (info);
5021 if (globals == NULL)
5024 stub_sec = stub_entry->stub_sec;
5026 if ((globals->fix_cortex_a8 < 0)
5027 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5028 /* We have to do less-strictly-aligned fixes last. */
5031 /* Assign a slot at the end of section if none assigned yet. */
5032 if (stub_entry->stub_offset == (bfd_vma) -1)
5034 stub_entry->stub_offset = stub_sec->size;
5037 loc = stub_sec->contents + stub_entry->stub_offset;
5039 stub_bfd = stub_sec->owner;
5041 /* This is the address of the stub destination. */
5042 sym_value = (stub_entry->target_value
5043 + stub_entry->target_section->output_offset
5044 + stub_entry->target_section->output_section->vma);
5046 template_sequence = stub_entry->stub_template;
5047 template_size = stub_entry->stub_template_size;
5050 for (i = 0; i < template_size; i++)
5052 switch (template_sequence[i].type)
5056 bfd_vma data = (bfd_vma) template_sequence[i].data;
5057 if (template_sequence[i].reloc_addend != 0)
5059 /* We've borrowed the reloc_addend field to mean we should
5060 insert a condition code into this (Thumb-1 branch)
5061 instruction. See THUMB16_BCOND_INSN. */
5062 BFD_ASSERT ((data & 0xff00) == 0xd000);
5063 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5065 bfd_put_16 (stub_bfd, data, loc + size);
5071 bfd_put_16 (stub_bfd,
5072 (template_sequence[i].data >> 16) & 0xffff,
5074 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5076 if (template_sequence[i].r_type != R_ARM_NONE)
5078 stub_reloc_idx[nrelocs] = i;
5079 stub_reloc_offset[nrelocs++] = size;
5085 bfd_put_32 (stub_bfd, template_sequence[i].data,
5087 /* Handle cases where the target is encoded within the
5089 if (template_sequence[i].r_type == R_ARM_JUMP24)
5091 stub_reloc_idx[nrelocs] = i;
5092 stub_reloc_offset[nrelocs++] = size;
5098 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5099 stub_reloc_idx[nrelocs] = i;
5100 stub_reloc_offset[nrelocs++] = size;
5111 stub_sec->size += size;
5113 /* Stub size has already been computed in arm_size_one_stub. Check
5115 BFD_ASSERT (size == stub_entry->stub_size);
5117 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5118 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5121 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5122 to relocate in each stub. */
5124 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5125 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5127 for (i = 0; i < nrelocs; i++)
5129 Elf_Internal_Rela rel;
5130 bfd_boolean unresolved_reloc;
5131 char *error_message;
5133 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5135 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5136 rel.r_info = ELF32_R_INFO (0,
5137 template_sequence[stub_reloc_idx[i]].r_type);
5140 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5141 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5142 template should refer back to the instruction after the original
5143 branch. We use target_section as Cortex-A8 erratum workaround stubs
5144 are only generated when both source and target are in the same
5146 points_to = stub_entry->target_section->output_section->vma
5147 + stub_entry->target_section->output_offset
5148 + stub_entry->source_value;
5150 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5151 (template_sequence[stub_reloc_idx[i]].r_type),
5152 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5153 points_to, info, stub_entry->target_section, "", STT_FUNC,
5154 stub_entry->branch_type,
5155 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5163 /* Calculate the template, template size and instruction size for a stub.
5164 Return value is the instruction size. */
5167 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5168 const insn_sequence **stub_template,
5169 int *stub_template_size)
5171 const insn_sequence *template_sequence = NULL;
5172 int template_size = 0, i;
5175 template_sequence = stub_definitions[stub_type].template_sequence;
5177 *stub_template = template_sequence;
5179 template_size = stub_definitions[stub_type].template_size;
5180 if (stub_template_size)
5181 *stub_template_size = template_size;
5184 for (i = 0; i < template_size; i++)
5186 switch (template_sequence[i].type)
5207 /* As above, but don't actually build the stub. Just bump offset so
5208 we know stub section sizes. */
5211 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5212 void *in_arg ATTRIBUTE_UNUSED)
5214 struct elf32_arm_stub_hash_entry *stub_entry;
5215 const insn_sequence *template_sequence;
5216 int template_size, size;
5218 /* Massage our args to the form they really have. */
5219 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5221 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5222 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5224 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5227 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5228 if (stub_entry->stub_template_size)
5230 stub_entry->stub_size = size;
5231 stub_entry->stub_template = template_sequence;
5232 stub_entry->stub_template_size = template_size;
5235 /* Already accounted for. */
5236 if (stub_entry->stub_offset != (bfd_vma) -1)
5239 size = (size + 7) & ~7;
5240 stub_entry->stub_sec->size += size;
5245 /* External entry points for sizing and building linker stubs. */
5247 /* Set up various things so that we can make a list of input sections
5248 for each output section included in the link. Returns -1 on error,
5249 0 when no stubs will be needed, and 1 on success. */
5252 elf32_arm_setup_section_lists (bfd *output_bfd,
5253 struct bfd_link_info *info)
5256 unsigned int bfd_count;
5257 unsigned int top_id, top_index;
5259 asection **input_list, **list;
5261 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5265 if (! is_elf_hash_table (htab))
5268 /* Count the number of input BFDs and find the top input section id. */
5269 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5271 input_bfd = input_bfd->link.next)
5274 for (section = input_bfd->sections;
5276 section = section->next)
5278 if (top_id < section->id)
5279 top_id = section->id;
5282 htab->bfd_count = bfd_count;
5284 amt = sizeof (struct map_stub) * (top_id + 1);
5285 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5286 if (htab->stub_group == NULL)
5288 htab->top_id = top_id;
5290 /* We can't use output_bfd->section_count here to find the top output
5291 section index as some sections may have been removed, and
5292 _bfd_strip_section_from_output doesn't renumber the indices. */
5293 for (section = output_bfd->sections, top_index = 0;
5295 section = section->next)
5297 if (top_index < section->index)
5298 top_index = section->index;
5301 htab->top_index = top_index;
5302 amt = sizeof (asection *) * (top_index + 1);
5303 input_list = (asection **) bfd_malloc (amt);
5304 htab->input_list = input_list;
5305 if (input_list == NULL)
5308 /* For sections we aren't interested in, mark their entries with a
5309 value we can check later. */
5310 list = input_list + top_index;
5312 *list = bfd_abs_section_ptr;
5313 while (list-- != input_list);
5315 for (section = output_bfd->sections;
5317 section = section->next)
5319 if ((section->flags & SEC_CODE) != 0)
5320 input_list[section->index] = NULL;
5326 /* The linker repeatedly calls this function for each input section,
5327 in the order that input sections are linked into output sections.
5328 Build lists of input sections to determine groupings between which
5329 we may insert linker stubs. */
5332 elf32_arm_next_input_section (struct bfd_link_info *info,
5335 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5340 if (isec->output_section->index <= htab->top_index)
5342 asection **list = htab->input_list + isec->output_section->index;
5344 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5346 /* Steal the link_sec pointer for our list. */
5347 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5348 /* This happens to make the list in reverse order,
5349 which we reverse later. */
5350 PREV_SEC (isec) = *list;
5356 /* See whether we can group stub sections together. Grouping stub
5357 sections may result in fewer stubs. More importantly, we need to
5358 put all .init* and .fini* stubs at the end of the .init or
5359 .fini output sections respectively, because glibc splits the
5360 _init and _fini functions into multiple parts. Putting a stub in
5361 the middle of a function is not a good idea. */
5364 group_sections (struct elf32_arm_link_hash_table *htab,
5365 bfd_size_type stub_group_size,
5366 bfd_boolean stubs_always_after_branch)
5368 asection **list = htab->input_list;
5372 asection *tail = *list;
5375 if (tail == bfd_abs_section_ptr)
5378 /* Reverse the list: we must avoid placing stubs at the
5379 beginning of the section because the beginning of the text
5380 section may be required for an interrupt vector in bare metal
5382 #define NEXT_SEC PREV_SEC
5384 while (tail != NULL)
5386 /* Pop from tail. */
5387 asection *item = tail;
5388 tail = PREV_SEC (item);
5391 NEXT_SEC (item) = head;
5395 while (head != NULL)
5399 bfd_vma stub_group_start = head->output_offset;
5400 bfd_vma end_of_next;
5403 while (NEXT_SEC (curr) != NULL)
5405 next = NEXT_SEC (curr);
5406 end_of_next = next->output_offset + next->size;
5407 if (end_of_next - stub_group_start >= stub_group_size)
5408 /* End of NEXT is too far from start, so stop. */
5410 /* Add NEXT to the group. */
5414 /* OK, the size from the start to the start of CURR is less
5415 than stub_group_size and thus can be handled by one stub
5416 section. (Or the head section is itself larger than
5417 stub_group_size, in which case we may be toast.)
5418 We should really be keeping track of the total size of
5419 stubs added here, as stubs contribute to the final output
5423 next = NEXT_SEC (head);
5424 /* Set up this stub group. */
5425 htab->stub_group[head->id].link_sec = curr;
5427 while (head != curr && (head = next) != NULL);
5429 /* But wait, there's more! Input sections up to stub_group_size
5430 bytes after the stub section can be handled by it too. */
5431 if (!stubs_always_after_branch)
5433 stub_group_start = curr->output_offset + curr->size;
5435 while (next != NULL)
5437 end_of_next = next->output_offset + next->size;
5438 if (end_of_next - stub_group_start >= stub_group_size)
5439 /* End of NEXT is too far from stubs, so stop. */
5441 /* Add NEXT to the stub group. */
5443 next = NEXT_SEC (head);
5444 htab->stub_group[head->id].link_sec = curr;
5450 while (list++ != htab->input_list + htab->top_index);
5452 free (htab->input_list);
5457 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5461 a8_reloc_compare (const void *a, const void *b)
5463 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5464 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5466 if (ra->from < rb->from)
5468 else if (ra->from > rb->from)
5474 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5475 const char *, char **);
5477 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5478 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5479 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5483 cortex_a8_erratum_scan (bfd *input_bfd,
5484 struct bfd_link_info *info,
5485 struct a8_erratum_fix **a8_fixes_p,
5486 unsigned int *num_a8_fixes_p,
5487 unsigned int *a8_fix_table_size_p,
5488 struct a8_erratum_reloc *a8_relocs,
5489 unsigned int num_a8_relocs,
5490 unsigned prev_num_a8_fixes,
5491 bfd_boolean *stub_changed_p)
5494 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5495 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5496 unsigned int num_a8_fixes = *num_a8_fixes_p;
5497 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5502 for (section = input_bfd->sections;
5504 section = section->next)
5506 bfd_byte *contents = NULL;
5507 struct _arm_elf_section_data *sec_data;
5511 if (elf_section_type (section) != SHT_PROGBITS
5512 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5513 || (section->flags & SEC_EXCLUDE) != 0
5514 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5515 || (section->output_section == bfd_abs_section_ptr))
5518 base_vma = section->output_section->vma + section->output_offset;
5520 if (elf_section_data (section)->this_hdr.contents != NULL)
5521 contents = elf_section_data (section)->this_hdr.contents;
5522 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5525 sec_data = elf32_arm_section_data (section);
5527 for (span = 0; span < sec_data->mapcount; span++)
5529 unsigned int span_start = sec_data->map[span].vma;
5530 unsigned int span_end = (span == sec_data->mapcount - 1)
5531 ? section->size : sec_data->map[span + 1].vma;
5533 char span_type = sec_data->map[span].type;
5534 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5536 if (span_type != 't')
5539 /* Span is entirely within a single 4KB region: skip scanning. */
5540 if (((base_vma + span_start) & ~0xfff)
5541 == ((base_vma + span_end) & ~0xfff))
5544 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5546 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5547 * The branch target is in the same 4KB region as the
5548 first half of the branch.
5549 * The instruction before the branch is a 32-bit
5550 length non-branch instruction. */
5551 for (i = span_start; i < span_end;)
5553 unsigned int insn = bfd_getl16 (&contents[i]);
5554 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5555 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5557 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5562 /* Load the rest of the insn (in manual-friendly order). */
5563 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5565 /* Encoding T4: B<c>.W. */
5566 is_b = (insn & 0xf800d000) == 0xf0009000;
5567 /* Encoding T1: BL<c>.W. */
5568 is_bl = (insn & 0xf800d000) == 0xf000d000;
5569 /* Encoding T2: BLX<c>.W. */
5570 is_blx = (insn & 0xf800d000) == 0xf000c000;
5571 /* Encoding T3: B<c>.W (not permitted in IT block). */
5572 is_bcc = (insn & 0xf800d000) == 0xf0008000
5573 && (insn & 0x07f00000) != 0x03800000;
5576 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5578 if (((base_vma + i) & 0xfff) == 0xffe
5582 && ! last_was_branch)
5584 bfd_signed_vma offset = 0;
5585 bfd_boolean force_target_arm = FALSE;
5586 bfd_boolean force_target_thumb = FALSE;
5588 enum elf32_arm_stub_type stub_type = arm_stub_none;
5589 struct a8_erratum_reloc key, *found;
5590 bfd_boolean use_plt = FALSE;
5592 key.from = base_vma + i;
5593 found = (struct a8_erratum_reloc *)
5594 bsearch (&key, a8_relocs, num_a8_relocs,
5595 sizeof (struct a8_erratum_reloc),
5600 char *error_message = NULL;
5601 struct elf_link_hash_entry *entry;
5603 /* We don't care about the error returned from this
5604 function, only if there is glue or not. */
5605 entry = find_thumb_glue (info, found->sym_name,
5609 found->non_a8_stub = TRUE;
5611 /* Keep a simpler condition, for the sake of clarity. */
5612 if (htab->root.splt != NULL && found->hash != NULL
5613 && found->hash->root.plt.offset != (bfd_vma) -1)
5616 if (found->r_type == R_ARM_THM_CALL)
5618 if (found->branch_type == ST_BRANCH_TO_ARM
5620 force_target_arm = TRUE;
5622 force_target_thumb = TRUE;
5626 /* Check if we have an offending branch instruction. */
5628 if (found && found->non_a8_stub)
5629 /* We've already made a stub for this instruction, e.g.
5630 it's a long branch or a Thumb->ARM stub. Assume that
5631 stub will suffice to work around the A8 erratum (see
5632 setting of always_after_branch above). */
5636 offset = (insn & 0x7ff) << 1;
5637 offset |= (insn & 0x3f0000) >> 4;
5638 offset |= (insn & 0x2000) ? 0x40000 : 0;
5639 offset |= (insn & 0x800) ? 0x80000 : 0;
5640 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5641 if (offset & 0x100000)
5642 offset |= ~ ((bfd_signed_vma) 0xfffff);
5643 stub_type = arm_stub_a8_veneer_b_cond;
5645 else if (is_b || is_bl || is_blx)
5647 int s = (insn & 0x4000000) != 0;
5648 int j1 = (insn & 0x2000) != 0;
5649 int j2 = (insn & 0x800) != 0;
5653 offset = (insn & 0x7ff) << 1;
5654 offset |= (insn & 0x3ff0000) >> 4;
5658 if (offset & 0x1000000)
5659 offset |= ~ ((bfd_signed_vma) 0xffffff);
5662 offset &= ~ ((bfd_signed_vma) 3);
5664 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5665 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5668 if (stub_type != arm_stub_none)
5670 bfd_vma pc_for_insn = base_vma + i + 4;
5672 /* The original instruction is a BL, but the target is
5673 an ARM instruction. If we were not making a stub,
5674 the BL would have been converted to a BLX. Use the
5675 BLX stub instead in that case. */
5676 if (htab->use_blx && force_target_arm
5677 && stub_type == arm_stub_a8_veneer_bl)
5679 stub_type = arm_stub_a8_veneer_blx;
5683 /* Conversely, if the original instruction was
5684 BLX but the target is Thumb mode, use the BL
5686 else if (force_target_thumb
5687 && stub_type == arm_stub_a8_veneer_blx)
5689 stub_type = arm_stub_a8_veneer_bl;
5695 pc_for_insn &= ~ ((bfd_vma) 3);
5697 /* If we found a relocation, use the proper destination,
5698 not the offset in the (unrelocated) instruction.
5699 Note this is always done if we switched the stub type
5703 (bfd_signed_vma) (found->destination - pc_for_insn);
5705 /* If the stub will use a Thumb-mode branch to a
5706 PLT target, redirect it to the preceding Thumb
5708 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5709 offset -= PLT_THUMB_STUB_SIZE;
5711 target = pc_for_insn + offset;
5713 /* The BLX stub is ARM-mode code. Adjust the offset to
5714 take the different PC value (+8 instead of +4) into
5716 if (stub_type == arm_stub_a8_veneer_blx)
5719 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5721 char *stub_name = NULL;
5723 if (num_a8_fixes == a8_fix_table_size)
5725 a8_fix_table_size *= 2;
5726 a8_fixes = (struct a8_erratum_fix *)
5727 bfd_realloc (a8_fixes,
5728 sizeof (struct a8_erratum_fix)
5729 * a8_fix_table_size);
5732 if (num_a8_fixes < prev_num_a8_fixes)
5734 /* If we're doing a subsequent scan,
5735 check if we've found the same fix as
5736 before, and try and reuse the stub
5738 stub_name = a8_fixes[num_a8_fixes].stub_name;
5739 if ((a8_fixes[num_a8_fixes].section != section)
5740 || (a8_fixes[num_a8_fixes].offset != i))
5744 *stub_changed_p = TRUE;
5750 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5751 if (stub_name != NULL)
5752 sprintf (stub_name, "%x:%x", section->id, i);
5755 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5756 a8_fixes[num_a8_fixes].section = section;
5757 a8_fixes[num_a8_fixes].offset = i;
5758 a8_fixes[num_a8_fixes].target_offset =
5760 a8_fixes[num_a8_fixes].orig_insn = insn;
5761 a8_fixes[num_a8_fixes].stub_name = stub_name;
5762 a8_fixes[num_a8_fixes].stub_type = stub_type;
5763 a8_fixes[num_a8_fixes].branch_type =
5764 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5771 i += insn_32bit ? 4 : 2;
5772 last_was_32bit = insn_32bit;
5773 last_was_branch = is_32bit_branch;
5777 if (elf_section_data (section)->this_hdr.contents == NULL)
5781 *a8_fixes_p = a8_fixes;
5782 *num_a8_fixes_p = num_a8_fixes;
5783 *a8_fix_table_size_p = a8_fix_table_size;
5788 /* Create or update a stub entry depending on whether the stub can already be
5789 found in HTAB. The stub is identified by:
5790 - its type STUB_TYPE
5791 - its source branch (note that several can share the same stub) whose
5792 section and relocation (if any) are given by SECTION and IRELA
5794 - its target symbol whose input section, hash, name, value and branch type
5795 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5798 If found, the value of the stub's target symbol is updated from SYM_VALUE
5799 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5800 TRUE and the stub entry is initialized.
5802 Returns the stub that was created or updated, or NULL if an error
5805 static struct elf32_arm_stub_hash_entry *
5806 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5807 enum elf32_arm_stub_type stub_type, asection *section,
5808 Elf_Internal_Rela *irela, asection *sym_sec,
5809 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5810 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5811 bfd_boolean *new_stub)
5813 const asection *id_sec;
5815 struct elf32_arm_stub_hash_entry *stub_entry;
5816 unsigned int r_type;
5817 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5819 BFD_ASSERT (stub_type != arm_stub_none);
5823 stub_name = sym_name;
5827 BFD_ASSERT (section);
5828 BFD_ASSERT (section->id <= htab->top_id);
5830 /* Support for grouping stub sections. */
5831 id_sec = htab->stub_group[section->id].link_sec;
5833 /* Get the name of this stub. */
5834 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5840 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5842 /* The proper stub has already been created, just update its value. */
5843 if (stub_entry != NULL)
5847 stub_entry->target_value = sym_value;
5851 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5852 if (stub_entry == NULL)
5859 stub_entry->target_value = sym_value;
5860 stub_entry->target_section = sym_sec;
5861 stub_entry->stub_type = stub_type;
5862 stub_entry->h = hash;
5863 stub_entry->branch_type = branch_type;
5866 stub_entry->output_name = sym_name;
5869 if (sym_name == NULL)
5870 sym_name = "unnamed";
5871 stub_entry->output_name = (char *)
5872 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5873 + strlen (sym_name));
5874 if (stub_entry->output_name == NULL)
5880 /* For historical reasons, use the existing names for ARM-to-Thumb and
5881 Thumb-to-ARM stubs. */
5882 r_type = ELF32_R_TYPE (irela->r_info);
5883 if ((r_type == (unsigned int) R_ARM_THM_CALL
5884 || r_type == (unsigned int) R_ARM_THM_JUMP24
5885 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5886 && branch_type == ST_BRANCH_TO_ARM)
5887 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5888 else if ((r_type == (unsigned int) R_ARM_CALL
5889 || r_type == (unsigned int) R_ARM_JUMP24)
5890 && branch_type == ST_BRANCH_TO_THUMB)
5891 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5893 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5900 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5901 gateway veneer to transition from non secure to secure state and create them
5904 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5905 defines the conditions that govern Secure Gateway veneer creation for a
5906 given symbol <SYM> as follows:
5907 - it has function type
5908 - it has non local binding
5909 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5910 same type, binding and value as <SYM> (called normal symbol).
5911 An entry function can handle secure state transition itself in which case
5912 its special symbol would have a different value from the normal symbol.
5914 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5915 entry mapping while HTAB gives the name to hash entry mapping.
5916 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5919 The return value gives whether a stub failed to be allocated. */
5922 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5923 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5924 int *cmse_stub_created)
5926 const struct elf_backend_data *bed;
5927 Elf_Internal_Shdr *symtab_hdr;
5928 unsigned i, j, sym_count, ext_start;
5929 Elf_Internal_Sym *cmse_sym, *local_syms;
5930 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5931 enum arm_st_branch_type branch_type;
5932 char *sym_name, *lsym_name;
5935 struct elf32_arm_stub_hash_entry *stub_entry;
5936 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5938 bed = get_elf_backend_data (input_bfd);
5939 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5940 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5941 ext_start = symtab_hdr->sh_info;
5942 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5943 && out_attr[Tag_CPU_arch_profile].i == 'M');
5945 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5946 if (local_syms == NULL)
5947 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5948 symtab_hdr->sh_info, 0, NULL, NULL,
5950 if (symtab_hdr->sh_info && local_syms == NULL)
5954 for (i = 0; i < sym_count; i++)
5956 cmse_invalid = FALSE;
5960 cmse_sym = &local_syms[i];
5961 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5962 symtab_hdr->sh_link,
5964 if (!sym_name || !CONST_STRNEQ (sym_name, CMSE_PREFIX))
5967 /* Special symbol with local binding. */
5968 cmse_invalid = TRUE;
5972 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5973 sym_name = (char *) cmse_hash->root.root.root.string;
5974 if (!CONST_STRNEQ (sym_name, CMSE_PREFIX))
5977 /* Special symbol has incorrect binding or type. */
5978 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5979 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5980 || cmse_hash->root.type != STT_FUNC)
5981 cmse_invalid = TRUE;
5986 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5987 "ARMv8-M architecture or later"),
5988 input_bfd, sym_name);
5989 is_v8m = TRUE; /* Avoid multiple warning. */
5995 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5996 " a global or weak function symbol"),
5997 input_bfd, sym_name);
6003 sym_name += strlen (CMSE_PREFIX);
6004 hash = (struct elf32_arm_link_hash_entry *)
6005 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6007 /* No associated normal symbol or it is neither global nor weak. */
6009 || (hash->root.root.type != bfd_link_hash_defined
6010 && hash->root.root.type != bfd_link_hash_defweak)
6011 || hash->root.type != STT_FUNC)
6013 /* Initialize here to avoid warning about use of possibly
6014 uninitialized variable. */
6019 /* Searching for a normal symbol with local binding. */
6020 for (; j < ext_start; j++)
6023 bfd_elf_string_from_elf_section (input_bfd,
6024 symtab_hdr->sh_link,
6025 local_syms[j].st_name);
6026 if (!strcmp (sym_name, lsym_name))
6031 if (hash || j < ext_start)
6034 (_("%pB: invalid standard symbol `%s'; it must be "
6035 "a global or weak function symbol"),
6036 input_bfd, sym_name);
6040 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6046 sym_value = hash->root.root.u.def.value;
6047 section = hash->root.root.u.def.section;
6049 if (cmse_hash->root.root.u.def.section != section)
6052 (_("%pB: `%s' and its special symbol are in different sections"),
6053 input_bfd, sym_name);
6056 if (cmse_hash->root.root.u.def.value != sym_value)
6057 continue; /* Ignore: could be an entry function starting with SG. */
6059 /* If this section is a link-once section that will be discarded, then
6060 don't create any stubs. */
6061 if (section->output_section == NULL)
6064 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6068 if (hash->root.size == 0)
6071 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6077 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6079 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6080 NULL, NULL, section, hash, sym_name,
6081 sym_value, branch_type, &new_stub);
6083 if (stub_entry == NULL)
6087 BFD_ASSERT (new_stub);
6088 (*cmse_stub_created)++;
6092 if (!symtab_hdr->contents)
6097 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6098 code entry function, ie can be called from non secure code without using a
6102 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6104 bfd_byte contents[4];
6105 uint32_t first_insn;
6110 /* Defined symbol of function type. */
6111 if (hash->root.root.type != bfd_link_hash_defined
6112 && hash->root.root.type != bfd_link_hash_defweak)
6114 if (hash->root.type != STT_FUNC)
6117 /* Read first instruction. */
6118 section = hash->root.root.u.def.section;
6119 abfd = section->owner;
6120 offset = hash->root.root.u.def.value - section->vma;
6121 if (!bfd_get_section_contents (abfd, section, contents, offset,
6125 first_insn = bfd_get_32 (abfd, contents);
6127 /* Starts by SG instruction. */
6128 return first_insn == 0xe97fe97f;
6131 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6132 secure gateway veneers (ie. the veneers was not in the input import library)
6133 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6136 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6138 struct elf32_arm_stub_hash_entry *stub_entry;
6139 struct bfd_link_info *info;
6141 /* Massage our args to the form they really have. */
6142 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6143 info = (struct bfd_link_info *) gen_info;
6145 if (info->out_implib_bfd)
6148 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6151 if (stub_entry->stub_offset == (bfd_vma) -1)
6152 _bfd_error_handler (" %s", stub_entry->output_name);
6157 /* Set offset of each secure gateway veneers so that its address remain
6158 identical to the one in the input import library referred by
6159 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6160 (present in input import library but absent from the executable being
6161 linked) or if new veneers appeared and there is no output import library
6162 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6163 number of secure gateway veneers found in the input import library.
6165 The function returns whether an error occurred. If no error occurred,
6166 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6167 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6168 veneer observed set for new veneers to be layed out after. */
6171 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6172 struct elf32_arm_link_hash_table *htab,
6173 int *cmse_stub_created)
6180 asection *stub_out_sec;
6181 bfd_boolean ret = TRUE;
6182 Elf_Internal_Sym *intsym;
6183 const char *out_sec_name;
6184 bfd_size_type cmse_stub_size;
6185 asymbol **sympp = NULL, *sym;
6186 struct elf32_arm_link_hash_entry *hash;
6187 const insn_sequence *cmse_stub_template;
6188 struct elf32_arm_stub_hash_entry *stub_entry;
6189 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6190 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6191 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6193 /* No input secure gateway import library. */
6194 if (!htab->in_implib_bfd)
6197 in_implib_bfd = htab->in_implib_bfd;
6198 if (!htab->cmse_implib)
6200 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6201 "Gateway import libraries"), in_implib_bfd);
6205 /* Get symbol table size. */
6206 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6210 /* Read in the input secure gateway import library's symbol table. */
6211 sympp = (asymbol **) bfd_malloc (symsize);
6215 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6222 htab->new_cmse_stub_offset = 0;
6224 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6225 &cmse_stub_template,
6226 &cmse_stub_template_size);
6228 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6230 bfd_get_section_by_name (htab->obfd, out_sec_name);
6231 if (stub_out_sec != NULL)
6232 cmse_stub_sec_vma = stub_out_sec->vma;
6234 /* Set addresses of veneers mentionned in input secure gateway import
6235 library's symbol table. */
6236 for (i = 0; i < symcount; i++)
6240 sym_name = (char *) bfd_asymbol_name (sym);
6241 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6243 if (sym->section != bfd_abs_section_ptr
6244 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6245 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6246 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6247 != ST_BRANCH_TO_THUMB))
6249 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6250 "symbol should be absolute, global and "
6251 "refer to Thumb functions"),
6252 in_implib_bfd, sym_name);
6257 veneer_value = bfd_asymbol_value (sym);
6258 stub_offset = veneer_value - cmse_stub_sec_vma;
6259 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6261 hash = (struct elf32_arm_link_hash_entry *)
6262 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6264 /* Stub entry should have been created by cmse_scan or the symbol be of
6265 a secure function callable from non secure code. */
6266 if (!stub_entry && !hash)
6268 bfd_boolean new_stub;
6271 (_("entry function `%s' disappeared from secure code"), sym_name);
6272 hash = (struct elf32_arm_link_hash_entry *)
6273 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6275 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6276 NULL, NULL, bfd_abs_section_ptr, hash,
6277 sym_name, veneer_value,
6278 ST_BRANCH_TO_THUMB, &new_stub);
6279 if (stub_entry == NULL)
6283 BFD_ASSERT (new_stub);
6284 new_cmse_stubs_created++;
6285 (*cmse_stub_created)++;
6287 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6288 stub_entry->stub_offset = stub_offset;
6290 /* Symbol found is not callable from non secure code. */
6291 else if (!stub_entry)
6293 if (!cmse_entry_fct_p (hash))
6295 _bfd_error_handler (_("`%s' refers to a non entry function"),
6303 /* Only stubs for SG veneers should have been created. */
6304 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6306 /* Check visibility hasn't changed. */
6307 if (!!(flags & BSF_GLOBAL)
6308 != (hash->root.root.type == bfd_link_hash_defined))
6310 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6313 stub_entry->stub_offset = stub_offset;
6316 /* Size should match that of a SG veneer. */
6317 if (intsym->st_size != cmse_stub_size)
6319 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6320 in_implib_bfd, sym_name);
6324 /* Previous veneer address is before current SG veneer section. */
6325 if (veneer_value < cmse_stub_sec_vma)
6327 /* Avoid offset underflow. */
6329 stub_entry->stub_offset = 0;
6334 /* Complain if stub offset not a multiple of stub size. */
6335 if (stub_offset % cmse_stub_size)
6338 (_("offset of veneer for entry function `%s' not a multiple of "
6339 "its size"), sym_name);
6346 new_cmse_stubs_created--;
6347 if (veneer_value < cmse_stub_array_start)
6348 cmse_stub_array_start = veneer_value;
6349 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6350 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6351 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6354 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6356 BFD_ASSERT (new_cmse_stubs_created > 0);
6358 (_("new entry function(s) introduced but no output import library "
6360 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6363 if (cmse_stub_array_start != cmse_stub_sec_vma)
6366 (_("start address of `%s' is different from previous link"),
6376 /* Determine and set the size of the stub section for a final link.
6378 The basic idea here is to examine all the relocations looking for
6379 PC-relative calls to a target that is unreachable with a "bl"
6383 elf32_arm_size_stubs (bfd *output_bfd,
6385 struct bfd_link_info *info,
6386 bfd_signed_vma group_size,
6387 asection * (*add_stub_section) (const char *, asection *,
6390 void (*layout_sections_again) (void))
6392 bfd_boolean ret = TRUE;
6393 obj_attribute *out_attr;
6394 int cmse_stub_created = 0;
6395 bfd_size_type stub_group_size;
6396 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6397 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6398 struct a8_erratum_fix *a8_fixes = NULL;
6399 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6400 struct a8_erratum_reloc *a8_relocs = NULL;
6401 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6406 if (htab->fix_cortex_a8)
6408 a8_fixes = (struct a8_erratum_fix *)
6409 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6410 a8_relocs = (struct a8_erratum_reloc *)
6411 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6414 /* Propagate mach to stub bfd, because it may not have been
6415 finalized when we created stub_bfd. */
6416 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6417 bfd_get_mach (output_bfd));
6419 /* Stash our params away. */
6420 htab->stub_bfd = stub_bfd;
6421 htab->add_stub_section = add_stub_section;
6422 htab->layout_sections_again = layout_sections_again;
6423 stubs_always_after_branch = group_size < 0;
6425 out_attr = elf_known_obj_attributes_proc (output_bfd);
6426 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6428 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6429 as the first half of a 32-bit branch straddling two 4K pages. This is a
6430 crude way of enforcing that. */
6431 if (htab->fix_cortex_a8)
6432 stubs_always_after_branch = 1;
6435 stub_group_size = -group_size;
6437 stub_group_size = group_size;
6439 if (stub_group_size == 1)
6441 /* Default values. */
6442 /* Thumb branch range is +-4MB has to be used as the default
6443 maximum size (a given section can contain both ARM and Thumb
6444 code, so the worst case has to be taken into account).
6446 This value is 24K less than that, which allows for 2025
6447 12-byte stubs. If we exceed that, then we will fail to link.
6448 The user will have to relink with an explicit group size
6450 stub_group_size = 4170000;
6453 group_sections (htab, stub_group_size, stubs_always_after_branch);
6455 /* If we're applying the cortex A8 fix, we need to determine the
6456 program header size now, because we cannot change it later --
6457 that could alter section placements. Notice the A8 erratum fix
6458 ends up requiring the section addresses to remain unchanged
6459 modulo the page size. That's something we cannot represent
6460 inside BFD, and we don't want to force the section alignment to
6461 be the page size. */
6462 if (htab->fix_cortex_a8)
6463 (*htab->layout_sections_again) ();
6468 unsigned int bfd_indx;
6470 enum elf32_arm_stub_type stub_type;
6471 bfd_boolean stub_changed = FALSE;
6472 unsigned prev_num_a8_fixes = num_a8_fixes;
6475 for (input_bfd = info->input_bfds, bfd_indx = 0;
6477 input_bfd = input_bfd->link.next, bfd_indx++)
6479 Elf_Internal_Shdr *symtab_hdr;
6481 Elf_Internal_Sym *local_syms = NULL;
6483 if (!is_arm_elf (input_bfd))
6485 if ((input_bfd->flags & DYNAMIC) != 0
6486 && (elf_sym_hashes (input_bfd) == NULL
6487 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6492 /* We'll need the symbol table in a second. */
6493 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6494 if (symtab_hdr->sh_info == 0)
6497 /* Limit scan of symbols to object file whose profile is
6498 Microcontroller to not hinder performance in the general case. */
6499 if (m_profile && first_veneer_scan)
6501 struct elf_link_hash_entry **sym_hashes;
6503 sym_hashes = elf_sym_hashes (input_bfd);
6504 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6505 &cmse_stub_created))
6506 goto error_ret_free_local;
6508 if (cmse_stub_created != 0)
6509 stub_changed = TRUE;
6512 /* Walk over each section attached to the input bfd. */
6513 for (section = input_bfd->sections;
6515 section = section->next)
6517 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6519 /* If there aren't any relocs, then there's nothing more
6521 if ((section->flags & SEC_RELOC) == 0
6522 || section->reloc_count == 0
6523 || (section->flags & SEC_CODE) == 0)
6526 /* If this section is a link-once section that will be
6527 discarded, then don't create any stubs. */
6528 if (section->output_section == NULL
6529 || section->output_section->owner != output_bfd)
6532 /* Get the relocs. */
6534 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6535 NULL, info->keep_memory);
6536 if (internal_relocs == NULL)
6537 goto error_ret_free_local;
6539 /* Now examine each relocation. */
6540 irela = internal_relocs;
6541 irelaend = irela + section->reloc_count;
6542 for (; irela < irelaend; irela++)
6544 unsigned int r_type, r_indx;
6547 bfd_vma destination;
6548 struct elf32_arm_link_hash_entry *hash;
6549 const char *sym_name;
6550 unsigned char st_type;
6551 enum arm_st_branch_type branch_type;
6552 bfd_boolean created_stub = FALSE;
6554 r_type = ELF32_R_TYPE (irela->r_info);
6555 r_indx = ELF32_R_SYM (irela->r_info);
6557 if (r_type >= (unsigned int) R_ARM_max)
6559 bfd_set_error (bfd_error_bad_value);
6560 error_ret_free_internal:
6561 if (elf_section_data (section)->relocs == NULL)
6562 free (internal_relocs);
6564 error_ret_free_local:
6565 if (symtab_hdr->contents != (unsigned char *) local_syms)
6571 if (r_indx >= symtab_hdr->sh_info)
6572 hash = elf32_arm_hash_entry
6573 (elf_sym_hashes (input_bfd)
6574 [r_indx - symtab_hdr->sh_info]);
6576 /* Only look for stubs on branch instructions, or
6577 non-relaxed TLSCALL */
6578 if ((r_type != (unsigned int) R_ARM_CALL)
6579 && (r_type != (unsigned int) R_ARM_THM_CALL)
6580 && (r_type != (unsigned int) R_ARM_JUMP24)
6581 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6582 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6583 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6584 && (r_type != (unsigned int) R_ARM_PLT32)
6585 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6586 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6587 && r_type == elf32_arm_tls_transition
6588 (info, r_type, &hash->root)
6589 && ((hash ? hash->tls_type
6590 : (elf32_arm_local_got_tls_type
6591 (input_bfd)[r_indx]))
6592 & GOT_TLS_GDESC) != 0))
6595 /* Now determine the call target, its name, value,
6602 if (r_type == (unsigned int) R_ARM_TLS_CALL
6603 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6605 /* A non-relaxed TLS call. The target is the
6606 plt-resident trampoline and nothing to do
6608 BFD_ASSERT (htab->tls_trampoline > 0);
6609 sym_sec = htab->root.splt;
6610 sym_value = htab->tls_trampoline;
6613 branch_type = ST_BRANCH_TO_ARM;
6617 /* It's a local symbol. */
6618 Elf_Internal_Sym *sym;
6620 if (local_syms == NULL)
6623 = (Elf_Internal_Sym *) symtab_hdr->contents;
6624 if (local_syms == NULL)
6626 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6627 symtab_hdr->sh_info, 0,
6629 if (local_syms == NULL)
6630 goto error_ret_free_internal;
6633 sym = local_syms + r_indx;
6634 if (sym->st_shndx == SHN_UNDEF)
6635 sym_sec = bfd_und_section_ptr;
6636 else if (sym->st_shndx == SHN_ABS)
6637 sym_sec = bfd_abs_section_ptr;
6638 else if (sym->st_shndx == SHN_COMMON)
6639 sym_sec = bfd_com_section_ptr;
6642 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6645 /* This is an undefined symbol. It can never
6649 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6650 sym_value = sym->st_value;
6651 destination = (sym_value + irela->r_addend
6652 + sym_sec->output_offset
6653 + sym_sec->output_section->vma);
6654 st_type = ELF_ST_TYPE (sym->st_info);
6656 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6658 = bfd_elf_string_from_elf_section (input_bfd,
6659 symtab_hdr->sh_link,
6664 /* It's an external symbol. */
6665 while (hash->root.root.type == bfd_link_hash_indirect
6666 || hash->root.root.type == bfd_link_hash_warning)
6667 hash = ((struct elf32_arm_link_hash_entry *)
6668 hash->root.root.u.i.link);
6670 if (hash->root.root.type == bfd_link_hash_defined
6671 || hash->root.root.type == bfd_link_hash_defweak)
6673 sym_sec = hash->root.root.u.def.section;
6674 sym_value = hash->root.root.u.def.value;
6676 struct elf32_arm_link_hash_table *globals =
6677 elf32_arm_hash_table (info);
6679 /* For a destination in a shared library,
6680 use the PLT stub as target address to
6681 decide whether a branch stub is
6684 && globals->root.splt != NULL
6686 && hash->root.plt.offset != (bfd_vma) -1)
6688 sym_sec = globals->root.splt;
6689 sym_value = hash->root.plt.offset;
6690 if (sym_sec->output_section != NULL)
6691 destination = (sym_value
6692 + sym_sec->output_offset
6693 + sym_sec->output_section->vma);
6695 else if (sym_sec->output_section != NULL)
6696 destination = (sym_value + irela->r_addend
6697 + sym_sec->output_offset
6698 + sym_sec->output_section->vma);
6700 else if ((hash->root.root.type == bfd_link_hash_undefined)
6701 || (hash->root.root.type == bfd_link_hash_undefweak))
6703 /* For a shared library, use the PLT stub as
6704 target address to decide whether a long
6705 branch stub is needed.
6706 For absolute code, they cannot be handled. */
6707 struct elf32_arm_link_hash_table *globals =
6708 elf32_arm_hash_table (info);
6711 && globals->root.splt != NULL
6713 && hash->root.plt.offset != (bfd_vma) -1)
6715 sym_sec = globals->root.splt;
6716 sym_value = hash->root.plt.offset;
6717 if (sym_sec->output_section != NULL)
6718 destination = (sym_value
6719 + sym_sec->output_offset
6720 + sym_sec->output_section->vma);
6727 bfd_set_error (bfd_error_bad_value);
6728 goto error_ret_free_internal;
6730 st_type = hash->root.type;
6732 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6733 sym_name = hash->root.root.root.string;
6738 bfd_boolean new_stub;
6739 struct elf32_arm_stub_hash_entry *stub_entry;
6741 /* Determine what (if any) linker stub is needed. */
6742 stub_type = arm_type_of_stub (info, section, irela,
6743 st_type, &branch_type,
6744 hash, destination, sym_sec,
6745 input_bfd, sym_name);
6746 if (stub_type == arm_stub_none)
6749 /* We've either created a stub for this reloc already,
6750 or we are about to. */
6752 elf32_arm_create_stub (htab, stub_type, section, irela,
6754 (char *) sym_name, sym_value,
6755 branch_type, &new_stub);
6757 created_stub = stub_entry != NULL;
6759 goto error_ret_free_internal;
6763 stub_changed = TRUE;
6767 /* Look for relocations which might trigger Cortex-A8
6769 if (htab->fix_cortex_a8
6770 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6771 || r_type == (unsigned int) R_ARM_THM_JUMP19
6772 || r_type == (unsigned int) R_ARM_THM_CALL
6773 || r_type == (unsigned int) R_ARM_THM_XPC22))
6775 bfd_vma from = section->output_section->vma
6776 + section->output_offset
6779 if ((from & 0xfff) == 0xffe)
6781 /* Found a candidate. Note we haven't checked the
6782 destination is within 4K here: if we do so (and
6783 don't create an entry in a8_relocs) we can't tell
6784 that a branch should have been relocated when
6786 if (num_a8_relocs == a8_reloc_table_size)
6788 a8_reloc_table_size *= 2;
6789 a8_relocs = (struct a8_erratum_reloc *)
6790 bfd_realloc (a8_relocs,
6791 sizeof (struct a8_erratum_reloc)
6792 * a8_reloc_table_size);
6795 a8_relocs[num_a8_relocs].from = from;
6796 a8_relocs[num_a8_relocs].destination = destination;
6797 a8_relocs[num_a8_relocs].r_type = r_type;
6798 a8_relocs[num_a8_relocs].branch_type = branch_type;
6799 a8_relocs[num_a8_relocs].sym_name = sym_name;
6800 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6801 a8_relocs[num_a8_relocs].hash = hash;
6808 /* We're done with the internal relocs, free them. */
6809 if (elf_section_data (section)->relocs == NULL)
6810 free (internal_relocs);
6813 if (htab->fix_cortex_a8)
6815 /* Sort relocs which might apply to Cortex-A8 erratum. */
6816 qsort (a8_relocs, num_a8_relocs,
6817 sizeof (struct a8_erratum_reloc),
6820 /* Scan for branches which might trigger Cortex-A8 erratum. */
6821 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6822 &num_a8_fixes, &a8_fix_table_size,
6823 a8_relocs, num_a8_relocs,
6824 prev_num_a8_fixes, &stub_changed)
6826 goto error_ret_free_local;
6829 if (local_syms != NULL
6830 && symtab_hdr->contents != (unsigned char *) local_syms)
6832 if (!info->keep_memory)
6835 symtab_hdr->contents = (unsigned char *) local_syms;
6839 if (first_veneer_scan
6840 && !set_cmse_veneer_addr_from_implib (info, htab,
6841 &cmse_stub_created))
6844 if (prev_num_a8_fixes != num_a8_fixes)
6845 stub_changed = TRUE;
6850 /* OK, we've added some stubs. Find out the new size of the
6852 for (stub_sec = htab->stub_bfd->sections;
6854 stub_sec = stub_sec->next)
6856 /* Ignore non-stub sections. */
6857 if (!strstr (stub_sec->name, STUB_SUFFIX))
6863 /* Add new SG veneers after those already in the input import
6865 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6868 bfd_vma *start_offset_p;
6869 asection **stub_sec_p;
6871 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6872 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6873 if (start_offset_p == NULL)
6876 BFD_ASSERT (stub_sec_p != NULL);
6877 if (*stub_sec_p != NULL)
6878 (*stub_sec_p)->size = *start_offset_p;
6881 /* Compute stub section size, considering padding. */
6882 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6883 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6887 asection **stub_sec_p;
6889 padding = arm_dedicated_stub_section_padding (stub_type);
6890 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6891 /* Skip if no stub input section or no stub section padding
6893 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6895 /* Stub section padding required but no dedicated section. */
6896 BFD_ASSERT (stub_sec_p);
6898 size = (*stub_sec_p)->size;
6899 size = (size + padding - 1) & ~(padding - 1);
6900 (*stub_sec_p)->size = size;
6903 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6904 if (htab->fix_cortex_a8)
6905 for (i = 0; i < num_a8_fixes; i++)
6907 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6908 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6910 if (stub_sec == NULL)
6914 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6919 /* Ask the linker to do its stuff. */
6920 (*htab->layout_sections_again) ();
6921 first_veneer_scan = FALSE;
6924 /* Add stubs for Cortex-A8 erratum fixes now. */
6925 if (htab->fix_cortex_a8)
6927 for (i = 0; i < num_a8_fixes; i++)
6929 struct elf32_arm_stub_hash_entry *stub_entry;
6930 char *stub_name = a8_fixes[i].stub_name;
6931 asection *section = a8_fixes[i].section;
6932 unsigned int section_id = a8_fixes[i].section->id;
6933 asection *link_sec = htab->stub_group[section_id].link_sec;
6934 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6935 const insn_sequence *template_sequence;
6936 int template_size, size = 0;
6938 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6940 if (stub_entry == NULL)
6942 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6943 section->owner, stub_name);
6947 stub_entry->stub_sec = stub_sec;
6948 stub_entry->stub_offset = (bfd_vma) -1;
6949 stub_entry->id_sec = link_sec;
6950 stub_entry->stub_type = a8_fixes[i].stub_type;
6951 stub_entry->source_value = a8_fixes[i].offset;
6952 stub_entry->target_section = a8_fixes[i].section;
6953 stub_entry->target_value = a8_fixes[i].target_offset;
6954 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6955 stub_entry->branch_type = a8_fixes[i].branch_type;
6957 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6961 stub_entry->stub_size = size;
6962 stub_entry->stub_template = template_sequence;
6963 stub_entry->stub_template_size = template_size;
6966 /* Stash the Cortex-A8 erratum fix array for use later in
6967 elf32_arm_write_section(). */
6968 htab->a8_erratum_fixes = a8_fixes;
6969 htab->num_a8_erratum_fixes = num_a8_fixes;
6973 htab->a8_erratum_fixes = NULL;
6974 htab->num_a8_erratum_fixes = 0;
6979 /* Build all the stubs associated with the current output file. The
6980 stubs are kept in a hash table attached to the main linker hash
6981 table. We also set up the .plt entries for statically linked PIC
6982 functions here. This function is called via arm_elf_finish in the
6986 elf32_arm_build_stubs (struct bfd_link_info *info)
6989 struct bfd_hash_table *table;
6990 enum elf32_arm_stub_type stub_type;
6991 struct elf32_arm_link_hash_table *htab;
6993 htab = elf32_arm_hash_table (info);
6997 for (stub_sec = htab->stub_bfd->sections;
6999 stub_sec = stub_sec->next)
7003 /* Ignore non-stub sections. */
7004 if (!strstr (stub_sec->name, STUB_SUFFIX))
7007 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7008 must at least be done for stub section requiring padding and for SG
7009 veneers to ensure that a non secure code branching to a removed SG
7010 veneer causes an error. */
7011 size = stub_sec->size;
7012 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7013 if (stub_sec->contents == NULL && size != 0)
7019 /* Add new SG veneers after those already in the input import library. */
7020 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7022 bfd_vma *start_offset_p;
7023 asection **stub_sec_p;
7025 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7026 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7027 if (start_offset_p == NULL)
7030 BFD_ASSERT (stub_sec_p != NULL);
7031 if (*stub_sec_p != NULL)
7032 (*stub_sec_p)->size = *start_offset_p;
7035 /* Build the stubs as directed by the stub hash table. */
7036 table = &htab->stub_hash_table;
7037 bfd_hash_traverse (table, arm_build_one_stub, info);
7038 if (htab->fix_cortex_a8)
7040 /* Place the cortex a8 stubs last. */
7041 htab->fix_cortex_a8 = -1;
7042 bfd_hash_traverse (table, arm_build_one_stub, info);
7048 /* Locate the Thumb encoded calling stub for NAME. */
7050 static struct elf_link_hash_entry *
7051 find_thumb_glue (struct bfd_link_info *link_info,
7053 char **error_message)
7056 struct elf_link_hash_entry *hash;
7057 struct elf32_arm_link_hash_table *hash_table;
7059 /* We need a pointer to the armelf specific hash table. */
7060 hash_table = elf32_arm_hash_table (link_info);
7061 if (hash_table == NULL)
7064 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7065 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7067 BFD_ASSERT (tmp_name);
7069 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7071 hash = elf_link_hash_lookup
7072 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7075 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7076 "Thumb", tmp_name, name) == -1)
7077 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7084 /* Locate the ARM encoded calling stub for NAME. */
7086 static struct elf_link_hash_entry *
7087 find_arm_glue (struct bfd_link_info *link_info,
7089 char **error_message)
7092 struct elf_link_hash_entry *myh;
7093 struct elf32_arm_link_hash_table *hash_table;
7095 /* We need a pointer to the elfarm specific hash table. */
7096 hash_table = elf32_arm_hash_table (link_info);
7097 if (hash_table == NULL)
7100 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7101 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7102 BFD_ASSERT (tmp_name);
7104 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7106 myh = elf_link_hash_lookup
7107 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7110 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7111 "ARM", tmp_name, name) == -1)
7112 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7119 /* ARM->Thumb glue (static images):
7123 ldr r12, __func_addr
7126 .word func @ behave as if you saw a ARM_32 reloc.
7133 .word func @ behave as if you saw a ARM_32 reloc.
7135 (relocatable images)
7138 ldr r12, __func_offset
7144 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7145 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7146 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7147 static const insn32 a2t3_func_addr_insn = 0x00000001;
7149 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7150 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7151 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7153 #define ARM2THUMB_PIC_GLUE_SIZE 16
7154 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7155 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7156 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7158 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7162 __func_from_thumb: __func_from_thumb:
7164 nop ldr r6, __func_addr
7174 #define THUMB2ARM_GLUE_SIZE 8
7175 static const insn16 t2a1_bx_pc_insn = 0x4778;
7176 static const insn16 t2a2_noop_insn = 0x46c0;
7177 static const insn32 t2a3_b_insn = 0xea000000;
7179 #define VFP11_ERRATUM_VENEER_SIZE 8
7180 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7181 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7183 #define ARM_BX_VENEER_SIZE 12
7184 static const insn32 armbx1_tst_insn = 0xe3100001;
7185 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7186 static const insn32 armbx3_bx_insn = 0xe12fff10;
7188 #ifndef ELFARM_NABI_C_INCLUDED
7190 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7193 bfd_byte * contents;
7197 /* Do not include empty glue sections in the output. */
7200 s = bfd_get_linker_section (abfd, name);
7202 s->flags |= SEC_EXCLUDE;
7207 BFD_ASSERT (abfd != NULL);
7209 s = bfd_get_linker_section (abfd, name);
7210 BFD_ASSERT (s != NULL);
7212 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7214 BFD_ASSERT (s->size == size);
7215 s->contents = contents;
7219 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7221 struct elf32_arm_link_hash_table * globals;
7223 globals = elf32_arm_hash_table (info);
7224 BFD_ASSERT (globals != NULL);
7226 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7227 globals->arm_glue_size,
7228 ARM2THUMB_GLUE_SECTION_NAME);
7230 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7231 globals->thumb_glue_size,
7232 THUMB2ARM_GLUE_SECTION_NAME);
7234 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7235 globals->vfp11_erratum_glue_size,
7236 VFP11_ERRATUM_VENEER_SECTION_NAME);
7238 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7239 globals->stm32l4xx_erratum_glue_size,
7240 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7242 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7243 globals->bx_glue_size,
7244 ARM_BX_GLUE_SECTION_NAME);
7249 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7250 returns the symbol identifying the stub. */
7252 static struct elf_link_hash_entry *
7253 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7254 struct elf_link_hash_entry * h)
7256 const char * name = h->root.root.string;
7259 struct elf_link_hash_entry * myh;
7260 struct bfd_link_hash_entry * bh;
7261 struct elf32_arm_link_hash_table * globals;
7265 globals = elf32_arm_hash_table (link_info);
7266 BFD_ASSERT (globals != NULL);
7267 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7269 s = bfd_get_linker_section
7270 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7272 BFD_ASSERT (s != NULL);
7274 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7275 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7276 BFD_ASSERT (tmp_name);
7278 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7280 myh = elf_link_hash_lookup
7281 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7285 /* We've already seen this guy. */
7290 /* The only trick here is using hash_table->arm_glue_size as the value.
7291 Even though the section isn't allocated yet, this is where we will be
7292 putting it. The +1 on the value marks that the stub has not been
7293 output yet - not that it is a Thumb function. */
7295 val = globals->arm_glue_size + 1;
7296 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7297 tmp_name, BSF_GLOBAL, s, val,
7298 NULL, TRUE, FALSE, &bh);
7300 myh = (struct elf_link_hash_entry *) bh;
7301 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7302 myh->forced_local = 1;
7306 if (bfd_link_pic (link_info)
7307 || globals->root.is_relocatable_executable
7308 || globals->pic_veneer)
7309 size = ARM2THUMB_PIC_GLUE_SIZE;
7310 else if (globals->use_blx)
7311 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7313 size = ARM2THUMB_STATIC_GLUE_SIZE;
7316 globals->arm_glue_size += size;
7321 /* Allocate space for ARMv4 BX veneers. */
7324 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7327 struct elf32_arm_link_hash_table *globals;
7329 struct elf_link_hash_entry *myh;
7330 struct bfd_link_hash_entry *bh;
7333 /* BX PC does not need a veneer. */
7337 globals = elf32_arm_hash_table (link_info);
7338 BFD_ASSERT (globals != NULL);
7339 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7341 /* Check if this veneer has already been allocated. */
7342 if (globals->bx_glue_offset[reg])
7345 s = bfd_get_linker_section
7346 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7348 BFD_ASSERT (s != NULL);
7350 /* Add symbol for veneer. */
7352 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7353 BFD_ASSERT (tmp_name);
7355 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7357 myh = elf_link_hash_lookup
7358 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7360 BFD_ASSERT (myh == NULL);
7363 val = globals->bx_glue_size;
7364 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7365 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7366 NULL, TRUE, FALSE, &bh);
7368 myh = (struct elf_link_hash_entry *) bh;
7369 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7370 myh->forced_local = 1;
7372 s->size += ARM_BX_VENEER_SIZE;
7373 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7374 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7378 /* Add an entry to the code/data map for section SEC. */
7381 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7383 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7384 unsigned int newidx;
7386 if (sec_data->map == NULL)
7388 sec_data->map = (elf32_arm_section_map *)
7389 bfd_malloc (sizeof (elf32_arm_section_map));
7390 sec_data->mapcount = 0;
7391 sec_data->mapsize = 1;
7394 newidx = sec_data->mapcount++;
7396 if (sec_data->mapcount > sec_data->mapsize)
7398 sec_data->mapsize *= 2;
7399 sec_data->map = (elf32_arm_section_map *)
7400 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7401 * sizeof (elf32_arm_section_map));
7406 sec_data->map[newidx].vma = vma;
7407 sec_data->map[newidx].type = type;
7412 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7413 veneers are handled for now. */
7416 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7417 elf32_vfp11_erratum_list *branch,
7419 asection *branch_sec,
7420 unsigned int offset)
7423 struct elf32_arm_link_hash_table *hash_table;
7425 struct elf_link_hash_entry *myh;
7426 struct bfd_link_hash_entry *bh;
7428 struct _arm_elf_section_data *sec_data;
7429 elf32_vfp11_erratum_list *newerr;
7431 hash_table = elf32_arm_hash_table (link_info);
7432 BFD_ASSERT (hash_table != NULL);
7433 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7435 s = bfd_get_linker_section
7436 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7438 sec_data = elf32_arm_section_data (s);
7440 BFD_ASSERT (s != NULL);
7442 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7443 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7444 BFD_ASSERT (tmp_name);
7446 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7447 hash_table->num_vfp11_fixes);
7449 myh = elf_link_hash_lookup
7450 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7452 BFD_ASSERT (myh == NULL);
7455 val = hash_table->vfp11_erratum_glue_size;
7456 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7457 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7458 NULL, TRUE, FALSE, &bh);
7460 myh = (struct elf_link_hash_entry *) bh;
7461 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7462 myh->forced_local = 1;
7464 /* Link veneer back to calling location. */
7465 sec_data->erratumcount += 1;
7466 newerr = (elf32_vfp11_erratum_list *)
7467 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7469 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7471 newerr->u.v.branch = branch;
7472 newerr->u.v.id = hash_table->num_vfp11_fixes;
7473 branch->u.b.veneer = newerr;
7475 newerr->next = sec_data->erratumlist;
7476 sec_data->erratumlist = newerr;
7478 /* A symbol for the return from the veneer. */
7479 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7480 hash_table->num_vfp11_fixes);
7482 myh = elf_link_hash_lookup
7483 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7490 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7491 branch_sec, val, NULL, TRUE, FALSE, &bh);
7493 myh = (struct elf_link_hash_entry *) bh;
7494 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7495 myh->forced_local = 1;
7499 /* Generate a mapping symbol for the veneer section, and explicitly add an
7500 entry for that symbol to the code/data map for the section. */
7501 if (hash_table->vfp11_erratum_glue_size == 0)
7504 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7505 ever requires this erratum fix. */
7506 _bfd_generic_link_add_one_symbol (link_info,
7507 hash_table->bfd_of_glue_owner, "$a",
7508 BSF_LOCAL, s, 0, NULL,
7511 myh = (struct elf_link_hash_entry *) bh;
7512 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7513 myh->forced_local = 1;
7515 /* The elf32_arm_init_maps function only cares about symbols from input
7516 BFDs. We must make a note of this generated mapping symbol
7517 ourselves so that code byteswapping works properly in
7518 elf32_arm_write_section. */
7519 elf32_arm_section_map_add (s, 'a', 0);
7522 s->size += VFP11_ERRATUM_VENEER_SIZE;
7523 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7524 hash_table->num_vfp11_fixes++;
7526 /* The offset of the veneer. */
7530 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7531 veneers need to be handled because used only in Cortex-M. */
7534 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7535 elf32_stm32l4xx_erratum_list *branch,
7537 asection *branch_sec,
7538 unsigned int offset,
7539 bfd_size_type veneer_size)
7542 struct elf32_arm_link_hash_table *hash_table;
7544 struct elf_link_hash_entry *myh;
7545 struct bfd_link_hash_entry *bh;
7547 struct _arm_elf_section_data *sec_data;
7548 elf32_stm32l4xx_erratum_list *newerr;
7550 hash_table = elf32_arm_hash_table (link_info);
7551 BFD_ASSERT (hash_table != NULL);
7552 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7554 s = bfd_get_linker_section
7555 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7557 BFD_ASSERT (s != NULL);
7559 sec_data = elf32_arm_section_data (s);
7561 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7562 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7563 BFD_ASSERT (tmp_name);
7565 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7566 hash_table->num_stm32l4xx_fixes);
7568 myh = elf_link_hash_lookup
7569 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7571 BFD_ASSERT (myh == NULL);
7574 val = hash_table->stm32l4xx_erratum_glue_size;
7575 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7576 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7577 NULL, TRUE, FALSE, &bh);
7579 myh = (struct elf_link_hash_entry *) bh;
7580 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7581 myh->forced_local = 1;
7583 /* Link veneer back to calling location. */
7584 sec_data->stm32l4xx_erratumcount += 1;
7585 newerr = (elf32_stm32l4xx_erratum_list *)
7586 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7588 newerr->type = STM32L4XX_ERRATUM_VENEER;
7590 newerr->u.v.branch = branch;
7591 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7592 branch->u.b.veneer = newerr;
7594 newerr->next = sec_data->stm32l4xx_erratumlist;
7595 sec_data->stm32l4xx_erratumlist = newerr;
7597 /* A symbol for the return from the veneer. */
7598 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7599 hash_table->num_stm32l4xx_fixes);
7601 myh = elf_link_hash_lookup
7602 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7609 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7610 branch_sec, val, NULL, TRUE, FALSE, &bh);
7612 myh = (struct elf_link_hash_entry *) bh;
7613 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7614 myh->forced_local = 1;
7618 /* Generate a mapping symbol for the veneer section, and explicitly add an
7619 entry for that symbol to the code/data map for the section. */
7620 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7623 /* Creates a THUMB symbol since there is no other choice. */
7624 _bfd_generic_link_add_one_symbol (link_info,
7625 hash_table->bfd_of_glue_owner, "$t",
7626 BSF_LOCAL, s, 0, NULL,
7629 myh = (struct elf_link_hash_entry *) bh;
7630 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7631 myh->forced_local = 1;
7633 /* The elf32_arm_init_maps function only cares about symbols from input
7634 BFDs. We must make a note of this generated mapping symbol
7635 ourselves so that code byteswapping works properly in
7636 elf32_arm_write_section. */
7637 elf32_arm_section_map_add (s, 't', 0);
7640 s->size += veneer_size;
7641 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7642 hash_table->num_stm32l4xx_fixes++;
7644 /* The offset of the veneer. */
7648 #define ARM_GLUE_SECTION_FLAGS \
7649 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7650 | SEC_READONLY | SEC_LINKER_CREATED)
7652 /* Create a fake section for use by the ARM backend of the linker. */
7655 arm_make_glue_section (bfd * abfd, const char * name)
7659 sec = bfd_get_linker_section (abfd, name);
7664 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7667 || !bfd_set_section_alignment (sec, 2))
7670 /* Set the gc mark to prevent the section from being removed by garbage
7671 collection, despite the fact that no relocs refer to this section. */
7677 /* Set size of .plt entries. This function is called from the
7678 linker scripts in ld/emultempl/{armelf}.em. */
7681 bfd_elf32_arm_use_long_plt (void)
7683 elf32_arm_use_long_plt_entry = TRUE;
7686 /* Add the glue sections to ABFD. This function is called from the
7687 linker scripts in ld/emultempl/{armelf}.em. */
7690 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7691 struct bfd_link_info *info)
7693 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7694 bfd_boolean dostm32l4xx = globals
7695 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7696 bfd_boolean addglue;
7698 /* If we are only performing a partial
7699 link do not bother adding the glue. */
7700 if (bfd_link_relocatable (info))
7703 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7704 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7705 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7706 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7712 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7715 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7716 ensures they are not marked for deletion by
7717 strip_excluded_output_sections () when veneers are going to be created
7718 later. Not doing so would trigger assert on empty section size in
7719 lang_size_sections_1 (). */
7722 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7724 enum elf32_arm_stub_type stub_type;
7726 /* If we are only performing a partial
7727 link do not bother adding the glue. */
7728 if (bfd_link_relocatable (info))
7731 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7734 const char *out_sec_name;
7736 if (!arm_dedicated_stub_output_section_required (stub_type))
7739 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7740 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7741 if (out_sec != NULL)
7742 out_sec->flags |= SEC_KEEP;
7746 /* Select a BFD to be used to hold the sections used by the glue code.
7747 This function is called from the linker scripts in ld/emultempl/
7751 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7753 struct elf32_arm_link_hash_table *globals;
7755 /* If we are only performing a partial link
7756 do not bother getting a bfd to hold the glue. */
7757 if (bfd_link_relocatable (info))
7760 /* Make sure we don't attach the glue sections to a dynamic object. */
7761 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7763 globals = elf32_arm_hash_table (info);
7764 BFD_ASSERT (globals != NULL);
7766 if (globals->bfd_of_glue_owner != NULL)
7769 /* Save the bfd for later use. */
7770 globals->bfd_of_glue_owner = abfd;
7776 check_use_blx (struct elf32_arm_link_hash_table *globals)
7780 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7783 if (globals->fix_arm1176)
7785 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7786 globals->use_blx = 1;
7790 if (cpu_arch > TAG_CPU_ARCH_V4T)
7791 globals->use_blx = 1;
7796 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7797 struct bfd_link_info *link_info)
7799 Elf_Internal_Shdr *symtab_hdr;
7800 Elf_Internal_Rela *internal_relocs = NULL;
7801 Elf_Internal_Rela *irel, *irelend;
7802 bfd_byte *contents = NULL;
7805 struct elf32_arm_link_hash_table *globals;
7807 /* If we are only performing a partial link do not bother
7808 to construct any glue. */
7809 if (bfd_link_relocatable (link_info))
7812 /* Here we have a bfd that is to be included on the link. We have a
7813 hook to do reloc rummaging, before section sizes are nailed down. */
7814 globals = elf32_arm_hash_table (link_info);
7815 BFD_ASSERT (globals != NULL);
7817 check_use_blx (globals);
7819 if (globals->byteswap_code && !bfd_big_endian (abfd))
7821 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7826 /* PR 5398: If we have not decided to include any loadable sections in
7827 the output then we will not have a glue owner bfd. This is OK, it
7828 just means that there is nothing else for us to do here. */
7829 if (globals->bfd_of_glue_owner == NULL)
7832 /* Rummage around all the relocs and map the glue vectors. */
7833 sec = abfd->sections;
7838 for (; sec != NULL; sec = sec->next)
7840 if (sec->reloc_count == 0)
7843 if ((sec->flags & SEC_EXCLUDE) != 0)
7846 symtab_hdr = & elf_symtab_hdr (abfd);
7848 /* Load the relocs. */
7850 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7852 if (internal_relocs == NULL)
7855 irelend = internal_relocs + sec->reloc_count;
7856 for (irel = internal_relocs; irel < irelend; irel++)
7859 unsigned long r_index;
7861 struct elf_link_hash_entry *h;
7863 r_type = ELF32_R_TYPE (irel->r_info);
7864 r_index = ELF32_R_SYM (irel->r_info);
7866 /* These are the only relocation types we care about. */
7867 if ( r_type != R_ARM_PC24
7868 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7871 /* Get the section contents if we haven't done so already. */
7872 if (contents == NULL)
7874 /* Get cached copy if it exists. */
7875 if (elf_section_data (sec)->this_hdr.contents != NULL)
7876 contents = elf_section_data (sec)->this_hdr.contents;
7879 /* Go get them off disk. */
7880 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7885 if (r_type == R_ARM_V4BX)
7889 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7890 record_arm_bx_glue (link_info, reg);
7894 /* If the relocation is not against a symbol it cannot concern us. */
7897 /* We don't care about local symbols. */
7898 if (r_index < symtab_hdr->sh_info)
7901 /* This is an external symbol. */
7902 r_index -= symtab_hdr->sh_info;
7903 h = (struct elf_link_hash_entry *)
7904 elf_sym_hashes (abfd)[r_index];
7906 /* If the relocation is against a static symbol it must be within
7907 the current section and so cannot be a cross ARM/Thumb relocation. */
7911 /* If the call will go through a PLT entry then we do not need
7913 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7919 /* This one is a call from arm code. We need to look up
7920 the target of the call. If it is a thumb target, we
7922 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7923 == ST_BRANCH_TO_THUMB)
7924 record_arm_to_thumb_glue (link_info, h);
7932 if (elf_section_data (sec)->this_hdr.contents != contents)
7936 if (elf_section_data (sec)->relocs != internal_relocs)
7937 free (internal_relocs);
7938 internal_relocs = NULL;
7944 if (elf_section_data (sec)->this_hdr.contents != contents)
7946 if (elf_section_data (sec)->relocs != internal_relocs)
7947 free (internal_relocs);
7954 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7957 bfd_elf32_arm_init_maps (bfd *abfd)
7959 Elf_Internal_Sym *isymbuf;
7960 Elf_Internal_Shdr *hdr;
7961 unsigned int i, localsyms;
7963 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7964 if (! is_arm_elf (abfd))
7967 if ((abfd->flags & DYNAMIC) != 0)
7970 hdr = & elf_symtab_hdr (abfd);
7971 localsyms = hdr->sh_info;
7973 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7974 should contain the number of local symbols, which should come before any
7975 global symbols. Mapping symbols are always local. */
7976 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7979 /* No internal symbols read? Skip this BFD. */
7980 if (isymbuf == NULL)
7983 for (i = 0; i < localsyms; i++)
7985 Elf_Internal_Sym *isym = &isymbuf[i];
7986 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7990 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7992 name = bfd_elf_string_from_elf_section (abfd,
7993 hdr->sh_link, isym->st_name);
7995 if (bfd_is_arm_special_symbol_name (name,
7996 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7997 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8003 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8004 say what they wanted. */
8007 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8009 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8010 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8012 if (globals == NULL)
8015 if (globals->fix_cortex_a8 == -1)
8017 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8018 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8019 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8020 || out_attr[Tag_CPU_arch_profile].i == 0))
8021 globals->fix_cortex_a8 = 1;
8023 globals->fix_cortex_a8 = 0;
8029 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8031 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8032 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8034 if (globals == NULL)
8036 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8037 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8039 switch (globals->vfp11_fix)
8041 case BFD_ARM_VFP11_FIX_DEFAULT:
8042 case BFD_ARM_VFP11_FIX_NONE:
8043 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8047 /* Give a warning, but do as the user requests anyway. */
8048 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8049 "workaround is not necessary for target architecture"), obfd);
8052 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8053 /* For earlier architectures, we might need the workaround, but do not
8054 enable it by default. If users is running with broken hardware, they
8055 must enable the erratum fix explicitly. */
8056 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8060 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8062 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8063 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8065 if (globals == NULL)
8068 /* We assume only Cortex-M4 may require the fix. */
8069 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8070 || out_attr[Tag_CPU_arch_profile].i != 'M')
8072 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8073 /* Give a warning, but do as the user requests anyway. */
8075 (_("%pB: warning: selected STM32L4XX erratum "
8076 "workaround is not necessary for target architecture"), obfd);
8080 enum bfd_arm_vfp11_pipe
8088 /* Return a VFP register number. This is encoded as RX:X for single-precision
8089 registers, or X:RX for double-precision registers, where RX is the group of
8090 four bits in the instruction encoding and X is the single extension bit.
8091 RX and X fields are specified using their lowest (starting) bit. The return
8094 0...31: single-precision registers s0...s31
8095 32...63: double-precision registers d0...d31.
8097 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8098 encounter VFP3 instructions, so we allow the full range for DP registers. */
8101 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8105 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8107 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8110 /* Set bits in *WMASK according to a register number REG as encoded by
8111 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8114 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8119 *wmask |= 3 << ((reg - 32) * 2);
8122 /* Return TRUE if WMASK overwrites anything in REGS. */
8125 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8129 for (i = 0; i < numregs; i++)
8131 unsigned int reg = regs[i];
8133 if (reg < 32 && (wmask & (1 << reg)) != 0)
8141 if ((wmask & (3 << (reg * 2))) != 0)
8148 /* In this function, we're interested in two things: finding input registers
8149 for VFP data-processing instructions, and finding the set of registers which
8150 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8151 hold the written set, so FLDM etc. are easy to deal with (we're only
8152 interested in 32 SP registers or 16 dp registers, due to the VFP version
8153 implemented by the chip in question). DP registers are marked by setting
8154 both SP registers in the write mask). */
8156 static enum bfd_arm_vfp11_pipe
8157 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8160 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8161 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8163 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8166 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8167 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8169 pqrs = ((insn & 0x00800000) >> 20)
8170 | ((insn & 0x00300000) >> 19)
8171 | ((insn & 0x00000040) >> 6);
8175 case 0: /* fmac[sd]. */
8176 case 1: /* fnmac[sd]. */
8177 case 2: /* fmsc[sd]. */
8178 case 3: /* fnmsc[sd]. */
8180 bfd_arm_vfp11_write_mask (destmask, fd);
8182 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8187 case 4: /* fmul[sd]. */
8188 case 5: /* fnmul[sd]. */
8189 case 6: /* fadd[sd]. */
8190 case 7: /* fsub[sd]. */
8194 case 8: /* fdiv[sd]. */
8197 bfd_arm_vfp11_write_mask (destmask, fd);
8198 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8203 case 15: /* extended opcode. */
8205 unsigned int extn = ((insn >> 15) & 0x1e)
8206 | ((insn >> 7) & 1);
8210 case 0: /* fcpy[sd]. */
8211 case 1: /* fabs[sd]. */
8212 case 2: /* fneg[sd]. */
8213 case 8: /* fcmp[sd]. */
8214 case 9: /* fcmpe[sd]. */
8215 case 10: /* fcmpz[sd]. */
8216 case 11: /* fcmpez[sd]. */
8217 case 16: /* fuito[sd]. */
8218 case 17: /* fsito[sd]. */
8219 case 24: /* ftoui[sd]. */
8220 case 25: /* ftouiz[sd]. */
8221 case 26: /* ftosi[sd]. */
8222 case 27: /* ftosiz[sd]. */
8223 /* These instructions will not bounce due to underflow. */
8228 case 3: /* fsqrt[sd]. */
8229 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8230 registers to cause the erratum in previous instructions. */
8231 bfd_arm_vfp11_write_mask (destmask, fd);
8235 case 15: /* fcvt{ds,sd}. */
8239 bfd_arm_vfp11_write_mask (destmask, fd);
8241 /* Only FCVTSD can underflow. */
8242 if ((insn & 0x100) != 0)
8261 /* Two-register transfer. */
8262 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8264 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8266 if ((insn & 0x100000) == 0)
8269 bfd_arm_vfp11_write_mask (destmask, fm);
8272 bfd_arm_vfp11_write_mask (destmask, fm);
8273 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8279 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8281 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8282 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8286 case 0: /* Two-reg transfer. We should catch these above. */
8289 case 2: /* fldm[sdx]. */
8293 unsigned int i, offset = insn & 0xff;
8298 for (i = fd; i < fd + offset; i++)
8299 bfd_arm_vfp11_write_mask (destmask, i);
8303 case 4: /* fld[sd]. */
8305 bfd_arm_vfp11_write_mask (destmask, fd);
8314 /* Single-register transfer. Note L==0. */
8315 else if ((insn & 0x0f100e10) == 0x0e000a10)
8317 unsigned int opcode = (insn >> 21) & 7;
8318 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8322 case 0: /* fmsr/fmdlr. */
8323 case 1: /* fmdhr. */
8324 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8325 destination register. I don't know if this is exactly right,
8326 but it is the conservative choice. */
8327 bfd_arm_vfp11_write_mask (destmask, fn);
8341 static int elf32_arm_compare_mapping (const void * a, const void * b);
8344 /* Look for potentially-troublesome code sequences which might trigger the
8345 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8346 (available from ARM) for details of the erratum. A short version is
8347 described in ld.texinfo. */
8350 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8353 bfd_byte *contents = NULL;
8355 int regs[3], numregs = 0;
8356 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8357 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8359 if (globals == NULL)
8362 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8363 The states transition as follows:
8365 0 -> 1 (vector) or 0 -> 2 (scalar)
8366 A VFP FMAC-pipeline instruction has been seen. Fill
8367 regs[0]..regs[numregs-1] with its input operands. Remember this
8368 instruction in 'first_fmac'.
8371 Any instruction, except for a VFP instruction which overwrites
8376 A VFP instruction has been seen which overwrites any of regs[*].
8377 We must make a veneer! Reset state to 0 before examining next
8381 If we fail to match anything in state 2, reset to state 0 and reset
8382 the instruction pointer to the instruction after 'first_fmac'.
8384 If the VFP11 vector mode is in use, there must be at least two unrelated
8385 instructions between anti-dependent VFP11 instructions to properly avoid
8386 triggering the erratum, hence the use of the extra state 1. */
8388 /* If we are only performing a partial link do not bother
8389 to construct any glue. */
8390 if (bfd_link_relocatable (link_info))
8393 /* Skip if this bfd does not correspond to an ELF image. */
8394 if (! is_arm_elf (abfd))
8397 /* We should have chosen a fix type by the time we get here. */
8398 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8400 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8403 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8404 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8407 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8409 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8410 struct _arm_elf_section_data *sec_data;
8412 /* If we don't have executable progbits, we're not interested in this
8413 section. Also skip if section is to be excluded. */
8414 if (elf_section_type (sec) != SHT_PROGBITS
8415 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8416 || (sec->flags & SEC_EXCLUDE) != 0
8417 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8418 || sec->output_section == bfd_abs_section_ptr
8419 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8422 sec_data = elf32_arm_section_data (sec);
8424 if (sec_data->mapcount == 0)
8427 if (elf_section_data (sec)->this_hdr.contents != NULL)
8428 contents = elf_section_data (sec)->this_hdr.contents;
8429 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8432 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8433 elf32_arm_compare_mapping);
8435 for (span = 0; span < sec_data->mapcount; span++)
8437 unsigned int span_start = sec_data->map[span].vma;
8438 unsigned int span_end = (span == sec_data->mapcount - 1)
8439 ? sec->size : sec_data->map[span + 1].vma;
8440 char span_type = sec_data->map[span].type;
8442 /* FIXME: Only ARM mode is supported at present. We may need to
8443 support Thumb-2 mode also at some point. */
8444 if (span_type != 'a')
8447 for (i = span_start; i < span_end;)
8449 unsigned int next_i = i + 4;
8450 unsigned int insn = bfd_big_endian (abfd)
8451 ? (((unsigned) contents[i] << 24)
8452 | (contents[i + 1] << 16)
8453 | (contents[i + 2] << 8)
8455 : (((unsigned) contents[i + 3] << 24)
8456 | (contents[i + 2] << 16)
8457 | (contents[i + 1] << 8)
8459 unsigned int writemask = 0;
8460 enum bfd_arm_vfp11_pipe vpipe;
8465 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8467 /* I'm assuming the VFP11 erratum can trigger with denorm
8468 operands on either the FMAC or the DS pipeline. This might
8469 lead to slightly overenthusiastic veneer insertion. */
8470 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8472 state = use_vector ? 1 : 2;
8474 veneer_of_insn = insn;
8480 int other_regs[3], other_numregs;
8481 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8484 if (vpipe != VFP11_BAD
8485 && bfd_arm_vfp11_antidependency (writemask, regs,
8495 int other_regs[3], other_numregs;
8496 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8499 if (vpipe != VFP11_BAD
8500 && bfd_arm_vfp11_antidependency (writemask, regs,
8506 next_i = first_fmac + 4;
8512 abort (); /* Should be unreachable. */
8517 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8518 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8520 elf32_arm_section_data (sec)->erratumcount += 1;
8522 newerr->u.b.vfp_insn = veneer_of_insn;
8527 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8534 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8539 newerr->next = sec_data->erratumlist;
8540 sec_data->erratumlist = newerr;
8549 if (elf_section_data (sec)->this_hdr.contents != contents)
8557 if (elf_section_data (sec)->this_hdr.contents != contents)
8563 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8564 after sections have been laid out, using specially-named symbols. */
8567 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8568 struct bfd_link_info *link_info)
8571 struct elf32_arm_link_hash_table *globals;
8574 if (bfd_link_relocatable (link_info))
8577 /* Skip if this bfd does not correspond to an ELF image. */
8578 if (! is_arm_elf (abfd))
8581 globals = elf32_arm_hash_table (link_info);
8582 if (globals == NULL)
8585 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8586 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8587 BFD_ASSERT (tmp_name);
8589 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8591 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8592 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8594 for (; errnode != NULL; errnode = errnode->next)
8596 struct elf_link_hash_entry *myh;
8599 switch (errnode->type)
8601 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8602 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8603 /* Find veneer symbol. */
8604 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8605 errnode->u.b.veneer->u.v.id);
8607 myh = elf_link_hash_lookup
8608 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8611 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8612 abfd, "VFP11", tmp_name);
8614 vma = myh->root.u.def.section->output_section->vma
8615 + myh->root.u.def.section->output_offset
8616 + myh->root.u.def.value;
8618 errnode->u.b.veneer->vma = vma;
8621 case VFP11_ERRATUM_ARM_VENEER:
8622 case VFP11_ERRATUM_THUMB_VENEER:
8623 /* Find return location. */
8624 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8627 myh = elf_link_hash_lookup
8628 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8631 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8632 abfd, "VFP11", tmp_name);
8634 vma = myh->root.u.def.section->output_section->vma
8635 + myh->root.u.def.section->output_offset
8636 + myh->root.u.def.value;
8638 errnode->u.v.branch->vma = vma;
8650 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8651 return locations after sections have been laid out, using
8652 specially-named symbols. */
8655 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8656 struct bfd_link_info *link_info)
8659 struct elf32_arm_link_hash_table *globals;
8662 if (bfd_link_relocatable (link_info))
8665 /* Skip if this bfd does not correspond to an ELF image. */
8666 if (! is_arm_elf (abfd))
8669 globals = elf32_arm_hash_table (link_info);
8670 if (globals == NULL)
8673 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8674 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8675 BFD_ASSERT (tmp_name);
8677 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8679 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8680 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8682 for (; errnode != NULL; errnode = errnode->next)
8684 struct elf_link_hash_entry *myh;
8687 switch (errnode->type)
8689 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8690 /* Find veneer symbol. */
8691 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8692 errnode->u.b.veneer->u.v.id);
8694 myh = elf_link_hash_lookup
8695 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8698 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8699 abfd, "STM32L4XX", tmp_name);
8701 vma = myh->root.u.def.section->output_section->vma
8702 + myh->root.u.def.section->output_offset
8703 + myh->root.u.def.value;
8705 errnode->u.b.veneer->vma = vma;
8708 case STM32L4XX_ERRATUM_VENEER:
8709 /* Find return location. */
8710 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8713 myh = elf_link_hash_lookup
8714 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8717 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8718 abfd, "STM32L4XX", tmp_name);
8720 vma = myh->root.u.def.section->output_section->vma
8721 + myh->root.u.def.section->output_offset
8722 + myh->root.u.def.value;
8724 errnode->u.v.branch->vma = vma;
8736 static inline bfd_boolean
8737 is_thumb2_ldmia (const insn32 insn)
8739 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8740 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8741 return (insn & 0xffd02000) == 0xe8900000;
8744 static inline bfd_boolean
8745 is_thumb2_ldmdb (const insn32 insn)
8747 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8748 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8749 return (insn & 0xffd02000) == 0xe9100000;
8752 static inline bfd_boolean
8753 is_thumb2_vldm (const insn32 insn)
8755 /* A6.5 Extension register load or store instruction
8757 We look for SP 32-bit and DP 64-bit registers.
8758 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8759 <list> is consecutive 64-bit registers
8760 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8761 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8762 <list> is consecutive 32-bit registers
8763 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8764 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8765 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8767 (((insn & 0xfe100f00) == 0xec100b00) ||
8768 ((insn & 0xfe100f00) == 0xec100a00))
8769 && /* (IA without !). */
8770 (((((insn << 7) >> 28) & 0xd) == 0x4)
8771 /* (IA with !), includes VPOP (when reg number is SP). */
8772 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8774 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8777 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8779 - computes the number and the mode of memory accesses
8780 - decides if the replacement should be done:
8781 . replaces only if > 8-word accesses
8782 . or (testing purposes only) replaces all accesses. */
8785 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8786 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8790 /* The field encoding the register list is the same for both LDMIA
8791 and LDMDB encodings. */
8792 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8793 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8794 else if (is_thumb2_vldm (insn))
8795 nb_words = (insn & 0xff);
8797 /* DEFAULT mode accounts for the real bug condition situation,
8798 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8800 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8801 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8804 /* Look for potentially-troublesome code sequences which might trigger
8805 the STM STM32L4XX erratum. */
8808 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8809 struct bfd_link_info *link_info)
8812 bfd_byte *contents = NULL;
8813 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8815 if (globals == NULL)
8818 /* If we are only performing a partial link do not bother
8819 to construct any glue. */
8820 if (bfd_link_relocatable (link_info))
8823 /* Skip if this bfd does not correspond to an ELF image. */
8824 if (! is_arm_elf (abfd))
8827 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8830 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8831 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8834 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8836 unsigned int i, span;
8837 struct _arm_elf_section_data *sec_data;
8839 /* If we don't have executable progbits, we're not interested in this
8840 section. Also skip if section is to be excluded. */
8841 if (elf_section_type (sec) != SHT_PROGBITS
8842 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8843 || (sec->flags & SEC_EXCLUDE) != 0
8844 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8845 || sec->output_section == bfd_abs_section_ptr
8846 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8849 sec_data = elf32_arm_section_data (sec);
8851 if (sec_data->mapcount == 0)
8854 if (elf_section_data (sec)->this_hdr.contents != NULL)
8855 contents = elf_section_data (sec)->this_hdr.contents;
8856 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8859 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8860 elf32_arm_compare_mapping);
8862 for (span = 0; span < sec_data->mapcount; span++)
8864 unsigned int span_start = sec_data->map[span].vma;
8865 unsigned int span_end = (span == sec_data->mapcount - 1)
8866 ? sec->size : sec_data->map[span + 1].vma;
8867 char span_type = sec_data->map[span].type;
8868 int itblock_current_pos = 0;
8870 /* Only Thumb2 mode need be supported with this CM4 specific
8871 code, we should not encounter any arm mode eg span_type
8873 if (span_type != 't')
8876 for (i = span_start; i < span_end;)
8878 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8879 bfd_boolean insn_32bit = FALSE;
8880 bfd_boolean is_ldm = FALSE;
8881 bfd_boolean is_vldm = FALSE;
8882 bfd_boolean is_not_last_in_it_block = FALSE;
8884 /* The first 16-bits of all 32-bit thumb2 instructions start
8885 with opcode[15..13]=0b111 and the encoded op1 can be anything
8886 except opcode[12..11]!=0b00.
8887 See 32-bit Thumb instruction encoding. */
8888 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8891 /* Compute the predicate that tells if the instruction
8892 is concerned by the IT block
8893 - Creates an error if there is a ldm that is not
8894 last in the IT block thus cannot be replaced
8895 - Otherwise we can create a branch at the end of the
8896 IT block, it will be controlled naturally by IT
8897 with the proper pseudo-predicate
8898 - So the only interesting predicate is the one that
8899 tells that we are not on the last item of an IT
8901 if (itblock_current_pos != 0)
8902 is_not_last_in_it_block = !!--itblock_current_pos;
8906 /* Load the rest of the insn (in manual-friendly order). */
8907 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8908 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8909 is_vldm = is_thumb2_vldm (insn);
8911 /* Veneers are created for (v)ldm depending on
8912 option flags and memory accesses conditions; but
8913 if the instruction is not the last instruction of
8914 an IT block, we cannot create a jump there, so we
8916 if ((is_ldm || is_vldm)
8917 && stm32l4xx_need_create_replacing_stub
8918 (insn, globals->stm32l4xx_fix))
8920 if (is_not_last_in_it_block)
8923 /* xgettext:c-format */
8924 (_("%pB(%pA+%#x): error: multiple load detected"
8925 " in non-last IT block instruction:"
8926 " STM32L4XX veneer cannot be generated; "
8927 "use gcc option -mrestrict-it to generate"
8928 " only one instruction per IT block"),
8933 elf32_stm32l4xx_erratum_list *newerr =
8934 (elf32_stm32l4xx_erratum_list *)
8936 (sizeof (elf32_stm32l4xx_erratum_list));
8938 elf32_arm_section_data (sec)
8939 ->stm32l4xx_erratumcount += 1;
8940 newerr->u.b.insn = insn;
8941 /* We create only thumb branches. */
8943 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8944 record_stm32l4xx_erratum_veneer
8945 (link_info, newerr, abfd, sec,
8948 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8949 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8951 newerr->next = sec_data->stm32l4xx_erratumlist;
8952 sec_data->stm32l4xx_erratumlist = newerr;
8959 IT blocks are only encoded in T1
8960 Encoding T1: IT{x{y{z}}} <firstcond>
8961 1 0 1 1 - 1 1 1 1 - firstcond - mask
8962 if mask = '0000' then see 'related encodings'
8963 We don't deal with UNPREDICTABLE, just ignore these.
8964 There can be no nested IT blocks so an IT block
8965 is naturally a new one for which it is worth
8966 computing its size. */
8967 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8968 && ((insn & 0x000f) != 0x0000);
8969 /* If we have a new IT block we compute its size. */
8972 /* Compute the number of instructions controlled
8973 by the IT block, it will be used to decide
8974 whether we are inside an IT block or not. */
8975 unsigned int mask = insn & 0x000f;
8976 itblock_current_pos = 4 - ctz (mask);
8980 i += insn_32bit ? 4 : 2;
8984 if (elf_section_data (sec)->this_hdr.contents != contents)
8992 if (elf_section_data (sec)->this_hdr.contents != contents)
8998 /* Set target relocation values needed during linking. */
9001 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9002 struct bfd_link_info *link_info,
9003 struct elf32_arm_params *params)
9005 struct elf32_arm_link_hash_table *globals;
9007 globals = elf32_arm_hash_table (link_info);
9008 if (globals == NULL)
9011 globals->target1_is_rel = params->target1_is_rel;
9012 if (globals->fdpic_p)
9013 globals->target2_reloc = R_ARM_GOT32;
9014 else if (strcmp (params->target2_type, "rel") == 0)
9015 globals->target2_reloc = R_ARM_REL32;
9016 else if (strcmp (params->target2_type, "abs") == 0)
9017 globals->target2_reloc = R_ARM_ABS32;
9018 else if (strcmp (params->target2_type, "got-rel") == 0)
9019 globals->target2_reloc = R_ARM_GOT_PREL;
9022 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9023 params->target2_type);
9025 globals->fix_v4bx = params->fix_v4bx;
9026 globals->use_blx |= params->use_blx;
9027 globals->vfp11_fix = params->vfp11_denorm_fix;
9028 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9029 if (globals->fdpic_p)
9030 globals->pic_veneer = 1;
9032 globals->pic_veneer = params->pic_veneer;
9033 globals->fix_cortex_a8 = params->fix_cortex_a8;
9034 globals->fix_arm1176 = params->fix_arm1176;
9035 globals->cmse_implib = params->cmse_implib;
9036 globals->in_implib_bfd = params->in_implib_bfd;
9038 BFD_ASSERT (is_arm_elf (output_bfd));
9039 elf_arm_tdata (output_bfd)->no_enum_size_warning
9040 = params->no_enum_size_warning;
9041 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9042 = params->no_wchar_size_warning;
9045 /* Replace the target offset of a Thumb bl or b.w instruction. */
9048 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9054 BFD_ASSERT ((offset & 1) == 0);
9056 upper = bfd_get_16 (abfd, insn);
9057 lower = bfd_get_16 (abfd, insn + 2);
9058 reloc_sign = (offset < 0) ? 1 : 0;
9059 upper = (upper & ~(bfd_vma) 0x7ff)
9060 | ((offset >> 12) & 0x3ff)
9061 | (reloc_sign << 10);
9062 lower = (lower & ~(bfd_vma) 0x2fff)
9063 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9064 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9065 | ((offset >> 1) & 0x7ff);
9066 bfd_put_16 (abfd, upper, insn);
9067 bfd_put_16 (abfd, lower, insn + 2);
9070 /* Thumb code calling an ARM function. */
9073 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9077 asection * input_section,
9078 bfd_byte * hit_data,
9081 bfd_signed_vma addend,
9083 char **error_message)
9087 long int ret_offset;
9088 struct elf_link_hash_entry * myh;
9089 struct elf32_arm_link_hash_table * globals;
9091 myh = find_thumb_glue (info, name, error_message);
9095 globals = elf32_arm_hash_table (info);
9096 BFD_ASSERT (globals != NULL);
9097 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9099 my_offset = myh->root.u.def.value;
9101 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9102 THUMB2ARM_GLUE_SECTION_NAME);
9104 BFD_ASSERT (s != NULL);
9105 BFD_ASSERT (s->contents != NULL);
9106 BFD_ASSERT (s->output_section != NULL);
9108 if ((my_offset & 0x01) == 0x01)
9111 && sym_sec->owner != NULL
9112 && !INTERWORK_FLAG (sym_sec->owner))
9115 (_("%pB(%s): warning: interworking not enabled;"
9116 " first occurrence: %pB: %s call to %s"),
9117 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9123 myh->root.u.def.value = my_offset;
9125 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9126 s->contents + my_offset);
9128 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9129 s->contents + my_offset + 2);
9132 /* Address of destination of the stub. */
9133 ((bfd_signed_vma) val)
9135 /* Offset from the start of the current section
9136 to the start of the stubs. */
9138 /* Offset of the start of this stub from the start of the stubs. */
9140 /* Address of the start of the current section. */
9141 + s->output_section->vma)
9142 /* The branch instruction is 4 bytes into the stub. */
9144 /* ARM branches work from the pc of the instruction + 8. */
9147 put_arm_insn (globals, output_bfd,
9148 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9149 s->contents + my_offset + 4);
9152 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9154 /* Now go back and fix up the original BL insn to point to here. */
9156 /* Address of where the stub is located. */
9157 (s->output_section->vma + s->output_offset + my_offset)
9158 /* Address of where the BL is located. */
9159 - (input_section->output_section->vma + input_section->output_offset
9161 /* Addend in the relocation. */
9163 /* Biassing for PC-relative addressing. */
9166 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9171 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9173 static struct elf_link_hash_entry *
9174 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9181 char ** error_message)
9184 long int ret_offset;
9185 struct elf_link_hash_entry * myh;
9186 struct elf32_arm_link_hash_table * globals;
9188 myh = find_arm_glue (info, name, error_message);
9192 globals = elf32_arm_hash_table (info);
9193 BFD_ASSERT (globals != NULL);
9194 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9196 my_offset = myh->root.u.def.value;
9198 if ((my_offset & 0x01) == 0x01)
9201 && sym_sec->owner != NULL
9202 && !INTERWORK_FLAG (sym_sec->owner))
9205 (_("%pB(%s): warning: interworking not enabled;"
9206 " first occurrence: %pB: %s call to %s"),
9207 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9211 myh->root.u.def.value = my_offset;
9213 if (bfd_link_pic (info)
9214 || globals->root.is_relocatable_executable
9215 || globals->pic_veneer)
9217 /* For relocatable objects we can't use absolute addresses,
9218 so construct the address from a relative offset. */
9219 /* TODO: If the offset is small it's probably worth
9220 constructing the address with adds. */
9221 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9222 s->contents + my_offset);
9223 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9224 s->contents + my_offset + 4);
9225 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9226 s->contents + my_offset + 8);
9227 /* Adjust the offset by 4 for the position of the add,
9228 and 8 for the pipeline offset. */
9229 ret_offset = (val - (s->output_offset
9230 + s->output_section->vma
9233 bfd_put_32 (output_bfd, ret_offset,
9234 s->contents + my_offset + 12);
9236 else if (globals->use_blx)
9238 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9239 s->contents + my_offset);
9241 /* It's a thumb address. Add the low order bit. */
9242 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9243 s->contents + my_offset + 4);
9247 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9248 s->contents + my_offset);
9250 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9251 s->contents + my_offset + 4);
9253 /* It's a thumb address. Add the low order bit. */
9254 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9255 s->contents + my_offset + 8);
9261 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9266 /* Arm code calling a Thumb function. */
9269 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9273 asection * input_section,
9274 bfd_byte * hit_data,
9277 bfd_signed_vma addend,
9279 char **error_message)
9281 unsigned long int tmp;
9284 long int ret_offset;
9285 struct elf_link_hash_entry * myh;
9286 struct elf32_arm_link_hash_table * globals;
9288 globals = elf32_arm_hash_table (info);
9289 BFD_ASSERT (globals != NULL);
9290 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9292 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9293 ARM2THUMB_GLUE_SECTION_NAME);
9294 BFD_ASSERT (s != NULL);
9295 BFD_ASSERT (s->contents != NULL);
9296 BFD_ASSERT (s->output_section != NULL);
9298 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9299 sym_sec, val, s, error_message);
9303 my_offset = myh->root.u.def.value;
9304 tmp = bfd_get_32 (input_bfd, hit_data);
9305 tmp = tmp & 0xFF000000;
9307 /* Somehow these are both 4 too far, so subtract 8. */
9308 ret_offset = (s->output_offset
9310 + s->output_section->vma
9311 - (input_section->output_offset
9312 + input_section->output_section->vma
9316 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9318 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9323 /* Populate Arm stub for an exported Thumb function. */
9326 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9328 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9330 struct elf_link_hash_entry * myh;
9331 struct elf32_arm_link_hash_entry *eh;
9332 struct elf32_arm_link_hash_table * globals;
9335 char *error_message;
9337 eh = elf32_arm_hash_entry (h);
9338 /* Allocate stubs for exported Thumb functions on v4t. */
9339 if (eh->export_glue == NULL)
9342 globals = elf32_arm_hash_table (info);
9343 BFD_ASSERT (globals != NULL);
9344 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9346 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9347 ARM2THUMB_GLUE_SECTION_NAME);
9348 BFD_ASSERT (s != NULL);
9349 BFD_ASSERT (s->contents != NULL);
9350 BFD_ASSERT (s->output_section != NULL);
9352 sec = eh->export_glue->root.u.def.section;
9354 BFD_ASSERT (sec->output_section != NULL);
9356 val = eh->export_glue->root.u.def.value + sec->output_offset
9357 + sec->output_section->vma;
9359 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9360 h->root.u.def.section->owner,
9361 globals->obfd, sec, val, s,
9367 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9370 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9375 struct elf32_arm_link_hash_table *globals;
9377 globals = elf32_arm_hash_table (info);
9378 BFD_ASSERT (globals != NULL);
9379 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9381 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9382 ARM_BX_GLUE_SECTION_NAME);
9383 BFD_ASSERT (s != NULL);
9384 BFD_ASSERT (s->contents != NULL);
9385 BFD_ASSERT (s->output_section != NULL);
9387 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9389 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9391 if ((globals->bx_glue_offset[reg] & 1) == 0)
9393 p = s->contents + glue_addr;
9394 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9395 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9396 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9397 globals->bx_glue_offset[reg] |= 1;
9400 return glue_addr + s->output_section->vma + s->output_offset;
9403 /* Generate Arm stubs for exported Thumb symbols. */
9405 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9406 struct bfd_link_info *link_info)
9408 struct elf32_arm_link_hash_table * globals;
9410 if (link_info == NULL)
9411 /* Ignore this if we are not called by the ELF backend linker. */
9414 globals = elf32_arm_hash_table (link_info);
9415 if (globals == NULL)
9418 /* If blx is available then exported Thumb symbols are OK and there is
9420 if (globals->use_blx)
9423 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9427 /* Reserve space for COUNT dynamic relocations in relocation selection
9431 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9432 bfd_size_type count)
9434 struct elf32_arm_link_hash_table *htab;
9436 htab = elf32_arm_hash_table (info);
9437 BFD_ASSERT (htab->root.dynamic_sections_created);
9440 sreloc->size += RELOC_SIZE (htab) * count;
9443 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9444 dynamic, the relocations should go in SRELOC, otherwise they should
9445 go in the special .rel.iplt section. */
9448 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9449 bfd_size_type count)
9451 struct elf32_arm_link_hash_table *htab;
9453 htab = elf32_arm_hash_table (info);
9454 if (!htab->root.dynamic_sections_created)
9455 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9458 BFD_ASSERT (sreloc != NULL);
9459 sreloc->size += RELOC_SIZE (htab) * count;
9463 /* Add relocation REL to the end of relocation section SRELOC. */
9466 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9467 asection *sreloc, Elf_Internal_Rela *rel)
9470 struct elf32_arm_link_hash_table *htab;
9472 htab = elf32_arm_hash_table (info);
9473 if (!htab->root.dynamic_sections_created
9474 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9475 sreloc = htab->root.irelplt;
9478 loc = sreloc->contents;
9479 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9480 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9482 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9485 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9486 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9490 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9491 bfd_boolean is_iplt_entry,
9492 union gotplt_union *root_plt,
9493 struct arm_plt_info *arm_plt)
9495 struct elf32_arm_link_hash_table *htab;
9499 htab = elf32_arm_hash_table (info);
9503 splt = htab->root.iplt;
9504 sgotplt = htab->root.igotplt;
9506 /* NaCl uses a special first entry in .iplt too. */
9507 if (htab->root.target_os == is_nacl && splt->size == 0)
9508 splt->size += htab->plt_header_size;
9510 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9511 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9515 splt = htab->root.splt;
9516 sgotplt = htab->root.sgotplt;
9520 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9521 /* For lazy binding, relocations will be put into .rel.plt, in
9522 .rel.got otherwise. */
9523 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9524 if (info->flags & DF_BIND_NOW)
9525 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9527 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9531 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9532 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9535 /* If this is the first .plt entry, make room for the special
9537 if (splt->size == 0)
9538 splt->size += htab->plt_header_size;
9540 htab->next_tls_desc_index++;
9543 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9544 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9545 splt->size += PLT_THUMB_STUB_SIZE;
9546 root_plt->offset = splt->size;
9547 splt->size += htab->plt_entry_size;
9549 /* We also need to make an entry in the .got.plt section, which
9550 will be placed in the .got section by the linker script. */
9552 arm_plt->got_offset = sgotplt->size;
9554 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9556 /* Function descriptor takes 64 bits in GOT. */
9563 arm_movw_immediate (bfd_vma value)
9565 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9569 arm_movt_immediate (bfd_vma value)
9571 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9574 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9575 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9576 Otherwise, DYNINDX is the index of the symbol in the dynamic
9577 symbol table and SYM_VALUE is undefined.
9579 ROOT_PLT points to the offset of the PLT entry from the start of its
9580 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9581 bookkeeping information.
9583 Returns FALSE if there was a problem. */
9586 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9587 union gotplt_union *root_plt,
9588 struct arm_plt_info *arm_plt,
9589 int dynindx, bfd_vma sym_value)
9591 struct elf32_arm_link_hash_table *htab;
9597 Elf_Internal_Rela rel;
9598 bfd_vma got_header_size;
9600 htab = elf32_arm_hash_table (info);
9602 /* Pick the appropriate sections and sizes. */
9605 splt = htab->root.iplt;
9606 sgot = htab->root.igotplt;
9607 srel = htab->root.irelplt;
9609 /* There are no reserved entries in .igot.plt, and no special
9610 first entry in .iplt. */
9611 got_header_size = 0;
9615 splt = htab->root.splt;
9616 sgot = htab->root.sgotplt;
9617 srel = htab->root.srelplt;
9619 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9621 BFD_ASSERT (splt != NULL && srel != NULL);
9623 bfd_vma got_offset, got_address, plt_address;
9624 bfd_vma got_displacement, initial_got_entry;
9627 BFD_ASSERT (sgot != NULL);
9629 /* Get the offset into the .(i)got.plt table of the entry that
9630 corresponds to this function. */
9631 got_offset = (arm_plt->got_offset & -2);
9633 /* Get the index in the procedure linkage table which
9634 corresponds to this symbol. This is the index of this symbol
9635 in all the symbols for which we are making plt entries.
9636 After the reserved .got.plt entries, all symbols appear in
9637 the same order as in .plt. */
9639 /* Function descriptor takes 8 bytes. */
9640 plt_index = (got_offset - got_header_size) / 8;
9642 plt_index = (got_offset - got_header_size) / 4;
9644 /* Calculate the address of the GOT entry. */
9645 got_address = (sgot->output_section->vma
9646 + sgot->output_offset
9649 /* ...and the address of the PLT entry. */
9650 plt_address = (splt->output_section->vma
9651 + splt->output_offset
9652 + root_plt->offset);
9654 ptr = splt->contents + root_plt->offset;
9655 if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9660 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9662 val = elf32_arm_vxworks_shared_plt_entry[i];
9664 val |= got_address - sgot->output_section->vma;
9666 val |= plt_index * RELOC_SIZE (htab);
9667 if (i == 2 || i == 5)
9668 bfd_put_32 (output_bfd, val, ptr);
9670 put_arm_insn (htab, output_bfd, val, ptr);
9673 else if (htab->root.target_os == is_vxworks)
9678 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9680 val = elf32_arm_vxworks_exec_plt_entry[i];
9684 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9686 val |= plt_index * RELOC_SIZE (htab);
9687 if (i == 2 || i == 5)
9688 bfd_put_32 (output_bfd, val, ptr);
9690 put_arm_insn (htab, output_bfd, val, ptr);
9693 loc = (htab->srelplt2->contents
9694 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9696 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9697 referencing the GOT for this PLT entry. */
9698 rel.r_offset = plt_address + 8;
9699 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9700 rel.r_addend = got_offset;
9701 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9702 loc += RELOC_SIZE (htab);
9704 /* Create the R_ARM_ABS32 relocation referencing the
9705 beginning of the PLT for this GOT entry. */
9706 rel.r_offset = got_address;
9707 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9709 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9711 else if (htab->root.target_os == is_nacl)
9713 /* Calculate the displacement between the PLT slot and the
9714 common tail that's part of the special initial PLT slot. */
9715 int32_t tail_displacement
9716 = ((splt->output_section->vma + splt->output_offset
9717 + ARM_NACL_PLT_TAIL_OFFSET)
9718 - (plt_address + htab->plt_entry_size + 4));
9719 BFD_ASSERT ((tail_displacement & 3) == 0);
9720 tail_displacement >>= 2;
9722 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9723 || (-tail_displacement & 0xff000000) == 0);
9725 /* Calculate the displacement between the PLT slot and the entry
9726 in the GOT. The offset accounts for the value produced by
9727 adding to pc in the penultimate instruction of the PLT stub. */
9728 got_displacement = (got_address
9729 - (plt_address + htab->plt_entry_size));
9731 /* NaCl does not support interworking at all. */
9732 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9734 put_arm_insn (htab, output_bfd,
9735 elf32_arm_nacl_plt_entry[0]
9736 | arm_movw_immediate (got_displacement),
9738 put_arm_insn (htab, output_bfd,
9739 elf32_arm_nacl_plt_entry[1]
9740 | arm_movt_immediate (got_displacement),
9742 put_arm_insn (htab, output_bfd,
9743 elf32_arm_nacl_plt_entry[2],
9745 put_arm_insn (htab, output_bfd,
9746 elf32_arm_nacl_plt_entry[3]
9747 | (tail_displacement & 0x00ffffff),
9750 else if (htab->fdpic_p)
9752 const bfd_vma *plt_entry = using_thumb_only(htab)
9753 ? elf32_arm_fdpic_thumb_plt_entry
9754 : elf32_arm_fdpic_plt_entry;
9756 /* Fill-up Thumb stub if needed. */
9757 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9759 put_thumb_insn (htab, output_bfd,
9760 elf32_arm_plt_thumb_stub[0], ptr - 4);
9761 put_thumb_insn (htab, output_bfd,
9762 elf32_arm_plt_thumb_stub[1], ptr - 2);
9764 /* As we are using 32 bit instructions even for the Thumb
9765 version, we have to use 'put_arm_insn' instead of
9766 'put_thumb_insn'. */
9767 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9768 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9769 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9770 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9771 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9773 if (!(info->flags & DF_BIND_NOW))
9775 /* funcdesc_value_reloc_offset. */
9776 bfd_put_32 (output_bfd,
9777 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9779 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9780 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9781 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9782 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9785 else if (using_thumb_only (htab))
9787 /* PR ld/16017: Generate thumb only PLT entries. */
9788 if (!using_thumb2 (htab))
9790 /* FIXME: We ought to be able to generate thumb-1 PLT
9792 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9797 /* Calculate the displacement between the PLT slot and the entry in
9798 the GOT. The 12-byte offset accounts for the value produced by
9799 adding to pc in the 3rd instruction of the PLT stub. */
9800 got_displacement = got_address - (plt_address + 12);
9802 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9803 instead of 'put_thumb_insn'. */
9804 put_arm_insn (htab, output_bfd,
9805 elf32_thumb2_plt_entry[0]
9806 | ((got_displacement & 0x000000ff) << 16)
9807 | ((got_displacement & 0x00000700) << 20)
9808 | ((got_displacement & 0x00000800) >> 1)
9809 | ((got_displacement & 0x0000f000) >> 12),
9811 put_arm_insn (htab, output_bfd,
9812 elf32_thumb2_plt_entry[1]
9813 | ((got_displacement & 0x00ff0000) )
9814 | ((got_displacement & 0x07000000) << 4)
9815 | ((got_displacement & 0x08000000) >> 17)
9816 | ((got_displacement & 0xf0000000) >> 28),
9818 put_arm_insn (htab, output_bfd,
9819 elf32_thumb2_plt_entry[2],
9821 put_arm_insn (htab, output_bfd,
9822 elf32_thumb2_plt_entry[3],
9827 /* Calculate the displacement between the PLT slot and the
9828 entry in the GOT. The eight-byte offset accounts for the
9829 value produced by adding to pc in the first instruction
9831 got_displacement = got_address - (plt_address + 8);
9833 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9835 put_thumb_insn (htab, output_bfd,
9836 elf32_arm_plt_thumb_stub[0], ptr - 4);
9837 put_thumb_insn (htab, output_bfd,
9838 elf32_arm_plt_thumb_stub[1], ptr - 2);
9841 if (!elf32_arm_use_long_plt_entry)
9843 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9845 put_arm_insn (htab, output_bfd,
9846 elf32_arm_plt_entry_short[0]
9847 | ((got_displacement & 0x0ff00000) >> 20),
9849 put_arm_insn (htab, output_bfd,
9850 elf32_arm_plt_entry_short[1]
9851 | ((got_displacement & 0x000ff000) >> 12),
9853 put_arm_insn (htab, output_bfd,
9854 elf32_arm_plt_entry_short[2]
9855 | (got_displacement & 0x00000fff),
9857 #ifdef FOUR_WORD_PLT
9858 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9863 put_arm_insn (htab, output_bfd,
9864 elf32_arm_plt_entry_long[0]
9865 | ((got_displacement & 0xf0000000) >> 28),
9867 put_arm_insn (htab, output_bfd,
9868 elf32_arm_plt_entry_long[1]
9869 | ((got_displacement & 0x0ff00000) >> 20),
9871 put_arm_insn (htab, output_bfd,
9872 elf32_arm_plt_entry_long[2]
9873 | ((got_displacement & 0x000ff000) >> 12),
9875 put_arm_insn (htab, output_bfd,
9876 elf32_arm_plt_entry_long[3]
9877 | (got_displacement & 0x00000fff),
9882 /* Fill in the entry in the .rel(a).(i)plt section. */
9883 rel.r_offset = got_address;
9887 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9888 The dynamic linker or static executable then calls SYM_VALUE
9889 to determine the correct run-time value of the .igot.plt entry. */
9890 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9891 initial_got_entry = sym_value;
9895 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9896 used by PLT entry. */
9899 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9900 initial_got_entry = 0;
9904 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9905 initial_got_entry = (splt->output_section->vma
9906 + splt->output_offset);
9909 When thumb only we need to set the LSB for any address that
9910 will be used with an interworking branch instruction. */
9911 if (using_thumb_only (htab))
9912 initial_got_entry |= 1;
9916 /* Fill in the entry in the global offset table. */
9917 bfd_put_32 (output_bfd, initial_got_entry,
9918 sgot->contents + got_offset);
9920 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9922 /* Setup initial funcdesc value. */
9923 /* FIXME: we don't support lazy binding because there is a
9924 race condition between both words getting written and
9925 some other thread attempting to read them. The ARM
9926 architecture does not have an atomic 64 bit load/store
9927 instruction that could be used to prevent it; it is
9928 recommended that threaded FDPIC applications run with the
9929 LD_BIND_NOW environment variable set. */
9930 bfd_put_32(output_bfd, plt_address + 0x18,
9931 sgot->contents + got_offset);
9932 bfd_put_32(output_bfd, -1 /*TODO*/,
9933 sgot->contents + got_offset + 4);
9937 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9942 /* For FDPIC we put PLT relocationss into .rel.got when not
9943 lazy binding otherwise we put them in .rel.plt. For now,
9944 we don't support lazy binding so put it in .rel.got. */
9945 if (info->flags & DF_BIND_NOW)
9946 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9948 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9952 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9953 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9960 /* Some relocations map to different relocations depending on the
9961 target. Return the real relocation. */
9964 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9970 if (globals->target1_is_rel)
9976 return globals->target2_reloc;
9983 /* Return the base VMA address which should be subtracted from real addresses
9984 when resolving @dtpoff relocation.
9985 This is PT_TLS segment p_vaddr. */
9988 dtpoff_base (struct bfd_link_info *info)
9990 /* If tls_sec is NULL, we should have signalled an error already. */
9991 if (elf_hash_table (info)->tls_sec == NULL)
9993 return elf_hash_table (info)->tls_sec->vma;
9996 /* Return the relocation value for @tpoff relocation
9997 if STT_TLS virtual address is ADDRESS. */
10000 tpoff (struct bfd_link_info *info, bfd_vma address)
10002 struct elf_link_hash_table *htab = elf_hash_table (info);
10005 /* If tls_sec is NULL, we should have signalled an error already. */
10006 if (htab->tls_sec == NULL)
10008 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10009 return address - htab->tls_sec->vma + base;
10012 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10013 VALUE is the relocation value. */
10015 static bfd_reloc_status_type
10016 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10019 return bfd_reloc_overflow;
10021 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10022 bfd_put_32 (abfd, value, data);
10023 return bfd_reloc_ok;
10026 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10027 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10028 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10030 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10031 is to then call final_link_relocate. Return other values in the
10034 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10035 the pre-relaxed code. It would be nice if the relocs were updated
10036 to match the optimization. */
10038 static bfd_reloc_status_type
10039 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10040 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10041 Elf_Internal_Rela *rel, unsigned long is_local)
10043 unsigned long insn;
10045 switch (ELF32_R_TYPE (rel->r_info))
10048 return bfd_reloc_notsupported;
10050 case R_ARM_TLS_GOTDESC:
10055 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10057 insn -= 5; /* THUMB */
10059 insn -= 8; /* ARM */
10061 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10062 return bfd_reloc_continue;
10064 case R_ARM_THM_TLS_DESCSEQ:
10066 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10067 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10071 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10073 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10077 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10080 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10082 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10086 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10089 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10090 contents + rel->r_offset);
10094 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10095 /* It's a 32 bit instruction, fetch the rest of it for
10096 error generation. */
10097 insn = (insn << 16)
10098 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10100 /* xgettext:c-format */
10101 (_("%pB(%pA+%#" PRIx64 "): "
10102 "unexpected %s instruction '%#lx' in TLS trampoline"),
10103 input_bfd, input_sec, (uint64_t) rel->r_offset,
10105 return bfd_reloc_notsupported;
10109 case R_ARM_TLS_DESCSEQ:
10111 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10112 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10116 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10117 contents + rel->r_offset);
10119 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10123 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10126 bfd_put_32 (input_bfd, insn & 0xfffff000,
10127 contents + rel->r_offset);
10129 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10133 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10136 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10137 contents + rel->r_offset);
10142 /* xgettext:c-format */
10143 (_("%pB(%pA+%#" PRIx64 "): "
10144 "unexpected %s instruction '%#lx' in TLS trampoline"),
10145 input_bfd, input_sec, (uint64_t) rel->r_offset,
10147 return bfd_reloc_notsupported;
10151 case R_ARM_TLS_CALL:
10152 /* GD->IE relaxation, turn the instruction into 'nop' or
10153 'ldr r0, [pc,r0]' */
10154 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10155 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10158 case R_ARM_THM_TLS_CALL:
10159 /* GD->IE relaxation. */
10161 /* add r0,pc; ldr r0, [r0] */
10163 else if (using_thumb2 (globals))
10170 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10171 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10174 return bfd_reloc_ok;
10177 /* For a given value of n, calculate the value of G_n as required to
10178 deal with group relocations. We return it in the form of an
10179 encoded constant-and-rotation, together with the final residual. If n is
10180 specified as less than zero, then final_residual is filled with the
10181 input value and no further action is performed. */
10184 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10188 bfd_vma encoded_g_n = 0;
10189 bfd_vma residual = value; /* Also known as Y_n. */
10191 for (current_n = 0; current_n <= n; current_n++)
10195 /* Calculate which part of the value to mask. */
10202 /* Determine the most significant bit in the residual and
10203 align the resulting value to a 2-bit boundary. */
10204 for (msb = 30; msb >= 0; msb -= 2)
10205 if (residual & (3u << msb))
10208 /* The desired shift is now (msb - 6), or zero, whichever
10215 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10216 g_n = residual & (0xff << shift);
10217 encoded_g_n = (g_n >> shift)
10218 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10220 /* Calculate the residual for the next time around. */
10224 *final_residual = residual;
10226 return encoded_g_n;
10229 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10230 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10233 identify_add_or_sub (bfd_vma insn)
10235 int opcode = insn & 0x1e00000;
10237 if (opcode == 1 << 23) /* ADD */
10240 if (opcode == 1 << 22) /* SUB */
10246 /* Perform a relocation as part of a final link. */
10248 static bfd_reloc_status_type
10249 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10252 asection * input_section,
10253 bfd_byte * contents,
10254 Elf_Internal_Rela * rel,
10256 struct bfd_link_info * info,
10257 asection * sym_sec,
10258 const char * sym_name,
10259 unsigned char st_type,
10260 enum arm_st_branch_type branch_type,
10261 struct elf_link_hash_entry * h,
10262 bfd_boolean * unresolved_reloc_p,
10263 char ** error_message)
10265 unsigned long r_type = howto->type;
10266 unsigned long r_symndx;
10267 bfd_byte * hit_data = contents + rel->r_offset;
10268 bfd_vma * local_got_offsets;
10269 bfd_vma * local_tlsdesc_gotents;
10272 asection * sreloc = NULL;
10273 asection * srelgot;
10275 bfd_signed_vma signed_addend;
10276 unsigned char dynreloc_st_type;
10277 bfd_vma dynreloc_value;
10278 struct elf32_arm_link_hash_table * globals;
10279 struct elf32_arm_link_hash_entry *eh;
10280 union gotplt_union *root_plt;
10281 struct arm_plt_info *arm_plt;
10282 bfd_vma plt_offset;
10283 bfd_vma gotplt_offset;
10284 bfd_boolean has_iplt_entry;
10285 bfd_boolean resolved_to_zero;
10287 globals = elf32_arm_hash_table (info);
10288 if (globals == NULL)
10289 return bfd_reloc_notsupported;
10291 BFD_ASSERT (is_arm_elf (input_bfd));
10292 BFD_ASSERT (howto != NULL);
10294 /* Some relocation types map to different relocations depending on the
10295 target. We pick the right one here. */
10296 r_type = arm_real_reloc_type (globals, r_type);
10298 /* It is possible to have linker relaxations on some TLS access
10299 models. Update our information here. */
10300 r_type = elf32_arm_tls_transition (info, r_type, h);
10302 if (r_type != howto->type)
10303 howto = elf32_arm_howto_from_type (r_type);
10305 eh = (struct elf32_arm_link_hash_entry *) h;
10306 sgot = globals->root.sgot;
10307 local_got_offsets = elf_local_got_offsets (input_bfd);
10308 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10310 if (globals->root.dynamic_sections_created)
10311 srelgot = globals->root.srelgot;
10315 r_symndx = ELF32_R_SYM (rel->r_info);
10317 if (globals->use_rel)
10321 switch (howto->size)
10323 case 0: addend = bfd_get_8 (input_bfd, hit_data); break;
10324 case 1: addend = bfd_get_16 (input_bfd, hit_data); break;
10325 case 2: addend = bfd_get_32 (input_bfd, hit_data); break;
10326 default: addend = 0; break;
10328 /* Note: the addend and signed_addend calculated here are
10329 incorrect for any split field. */
10330 addend &= howto->src_mask;
10331 sign = howto->src_mask & ~(howto->src_mask >> 1);
10332 signed_addend = (addend ^ sign) - sign;
10333 signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10334 addend <<= howto->rightshift;
10337 addend = signed_addend = rel->r_addend;
10339 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10340 are resolving a function call relocation. */
10341 if (using_thumb_only (globals)
10342 && (r_type == R_ARM_THM_CALL
10343 || r_type == R_ARM_THM_JUMP24)
10344 && branch_type == ST_BRANCH_TO_ARM)
10345 branch_type = ST_BRANCH_TO_THUMB;
10347 /* Record the symbol information that should be used in dynamic
10349 dynreloc_st_type = st_type;
10350 dynreloc_value = value;
10351 if (branch_type == ST_BRANCH_TO_THUMB)
10352 dynreloc_value |= 1;
10354 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10355 VALUE appropriately for relocations that we resolve at link time. */
10356 has_iplt_entry = FALSE;
10357 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10359 && root_plt->offset != (bfd_vma) -1)
10361 plt_offset = root_plt->offset;
10362 gotplt_offset = arm_plt->got_offset;
10364 if (h == NULL || eh->is_iplt)
10366 has_iplt_entry = TRUE;
10367 splt = globals->root.iplt;
10369 /* Populate .iplt entries here, because not all of them will
10370 be seen by finish_dynamic_symbol. The lower bit is set if
10371 we have already populated the entry. */
10372 if (plt_offset & 1)
10376 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10377 -1, dynreloc_value))
10378 root_plt->offset |= 1;
10380 return bfd_reloc_notsupported;
10383 /* Static relocations always resolve to the .iplt entry. */
10384 st_type = STT_FUNC;
10385 value = (splt->output_section->vma
10386 + splt->output_offset
10388 branch_type = ST_BRANCH_TO_ARM;
10390 /* If there are non-call relocations that resolve to the .iplt
10391 entry, then all dynamic ones must too. */
10392 if (arm_plt->noncall_refcount != 0)
10394 dynreloc_st_type = st_type;
10395 dynreloc_value = value;
10399 /* We populate the .plt entry in finish_dynamic_symbol. */
10400 splt = globals->root.splt;
10405 plt_offset = (bfd_vma) -1;
10406 gotplt_offset = (bfd_vma) -1;
10409 resolved_to_zero = (h != NULL
10410 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10415 /* We don't need to find a value for this symbol. It's just a
10417 *unresolved_reloc_p = FALSE;
10418 return bfd_reloc_ok;
10421 if (globals->root.target_os != is_vxworks)
10422 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10423 /* Fall through. */
10427 case R_ARM_ABS32_NOI:
10429 case R_ARM_REL32_NOI:
10435 /* Handle relocations which should use the PLT entry. ABS32/REL32
10436 will use the symbol's value, which may point to a PLT entry, but we
10437 don't need to handle that here. If we created a PLT entry, all
10438 branches in this object should go to it, except if the PLT is too
10439 far away, in which case a long branch stub should be inserted. */
10440 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10441 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10442 && r_type != R_ARM_CALL
10443 && r_type != R_ARM_JUMP24
10444 && r_type != R_ARM_PLT32)
10445 && plt_offset != (bfd_vma) -1)
10447 /* If we've created a .plt section, and assigned a PLT entry
10448 to this function, it must either be a STT_GNU_IFUNC reference
10449 or not be known to bind locally. In other cases, we should
10450 have cleared the PLT entry by now. */
10451 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10453 value = (splt->output_section->vma
10454 + splt->output_offset
10456 *unresolved_reloc_p = FALSE;
10457 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10458 contents, rel->r_offset, value,
10462 /* When generating a shared object or relocatable executable, these
10463 relocations are copied into the output file to be resolved at
10465 if ((bfd_link_pic (info)
10466 || globals->root.is_relocatable_executable
10467 || globals->fdpic_p)
10468 && (input_section->flags & SEC_ALLOC)
10469 && !(globals->root.target_os == is_vxworks
10470 && strcmp (input_section->output_section->name,
10472 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10473 || !SYMBOL_CALLS_LOCAL (info, h))
10474 && !(input_bfd == globals->stub_bfd
10475 && strstr (input_section->name, STUB_SUFFIX))
10477 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10478 && !resolved_to_zero)
10479 || h->root.type != bfd_link_hash_undefweak)
10480 && r_type != R_ARM_PC24
10481 && r_type != R_ARM_CALL
10482 && r_type != R_ARM_JUMP24
10483 && r_type != R_ARM_PREL31
10484 && r_type != R_ARM_PLT32)
10486 Elf_Internal_Rela outrel;
10487 bfd_boolean skip, relocate;
10490 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10491 && !h->def_regular)
10493 char *v = _("shared object");
10495 if (bfd_link_executable (info))
10496 v = _("PIE executable");
10499 (_("%pB: relocation %s against external or undefined symbol `%s'"
10500 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10501 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10502 return bfd_reloc_notsupported;
10505 *unresolved_reloc_p = FALSE;
10507 if (sreloc == NULL && globals->root.dynamic_sections_created)
10509 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10510 ! globals->use_rel);
10512 if (sreloc == NULL)
10513 return bfd_reloc_notsupported;
10519 outrel.r_addend = addend;
10521 _bfd_elf_section_offset (output_bfd, info, input_section,
10523 if (outrel.r_offset == (bfd_vma) -1)
10525 else if (outrel.r_offset == (bfd_vma) -2)
10526 skip = TRUE, relocate = TRUE;
10527 outrel.r_offset += (input_section->output_section->vma
10528 + input_section->output_offset);
10531 memset (&outrel, 0, sizeof outrel);
10533 && h->dynindx != -1
10534 && (!bfd_link_pic (info)
10535 || !(bfd_link_pie (info)
10536 || SYMBOLIC_BIND (info, h))
10537 || !h->def_regular))
10538 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10543 /* This symbol is local, or marked to become local. */
10544 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10545 || (globals->fdpic_p && !bfd_link_pic(info)));
10546 /* On SVR4-ish systems, the dynamic loader cannot
10547 relocate the text and data segments independently,
10548 so the symbol does not matter. */
10550 if (dynreloc_st_type == STT_GNU_IFUNC)
10551 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10552 to the .iplt entry. Instead, every non-call reference
10553 must use an R_ARM_IRELATIVE relocation to obtain the
10554 correct run-time address. */
10555 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10556 else if (globals->fdpic_p && !bfd_link_pic(info))
10559 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10560 if (globals->use_rel)
10563 outrel.r_addend += dynreloc_value;
10567 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10569 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10571 /* If this reloc is against an external symbol, we do not want to
10572 fiddle with the addend. Otherwise, we need to include the symbol
10573 value so that it becomes an addend for the dynamic reloc. */
10575 return bfd_reloc_ok;
10577 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10578 contents, rel->r_offset,
10579 dynreloc_value, (bfd_vma) 0);
10581 else switch (r_type)
10584 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10586 case R_ARM_XPC25: /* Arm BLX instruction. */
10589 case R_ARM_PC24: /* Arm B/BL instruction. */
10592 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10594 if (r_type == R_ARM_XPC25)
10596 /* Check for Arm calling Arm function. */
10597 /* FIXME: Should we translate the instruction into a BL
10598 instruction instead ? */
10599 if (branch_type != ST_BRANCH_TO_THUMB)
10601 (_("\%pB: warning: %s BLX instruction targets"
10602 " %s function '%s'"),
10604 "ARM", h ? h->root.root.string : "(local)");
10606 else if (r_type == R_ARM_PC24)
10608 /* Check for Arm calling Thumb function. */
10609 if (branch_type == ST_BRANCH_TO_THUMB)
10611 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10612 output_bfd, input_section,
10613 hit_data, sym_sec, rel->r_offset,
10614 signed_addend, value,
10616 return bfd_reloc_ok;
10618 return bfd_reloc_dangerous;
10622 /* Check if a stub has to be inserted because the
10623 destination is too far or we are changing mode. */
10624 if ( r_type == R_ARM_CALL
10625 || r_type == R_ARM_JUMP24
10626 || r_type == R_ARM_PLT32)
10628 enum elf32_arm_stub_type stub_type = arm_stub_none;
10629 struct elf32_arm_link_hash_entry *hash;
10631 hash = (struct elf32_arm_link_hash_entry *) h;
10632 stub_type = arm_type_of_stub (info, input_section, rel,
10633 st_type, &branch_type,
10634 hash, value, sym_sec,
10635 input_bfd, sym_name);
10637 if (stub_type != arm_stub_none)
10639 /* The target is out of reach, so redirect the
10640 branch to the local stub for this function. */
10641 stub_entry = elf32_arm_get_stub_entry (input_section,
10646 if (stub_entry != NULL)
10647 value = (stub_entry->stub_offset
10648 + stub_entry->stub_sec->output_offset
10649 + stub_entry->stub_sec->output_section->vma);
10651 if (plt_offset != (bfd_vma) -1)
10652 *unresolved_reloc_p = FALSE;
10657 /* If the call goes through a PLT entry, make sure to
10658 check distance to the right destination address. */
10659 if (plt_offset != (bfd_vma) -1)
10661 value = (splt->output_section->vma
10662 + splt->output_offset
10664 *unresolved_reloc_p = FALSE;
10665 /* The PLT entry is in ARM mode, regardless of the
10666 target function. */
10667 branch_type = ST_BRANCH_TO_ARM;
10672 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10674 S is the address of the symbol in the relocation.
10675 P is address of the instruction being relocated.
10676 A is the addend (extracted from the instruction) in bytes.
10678 S is held in 'value'.
10679 P is the base address of the section containing the
10680 instruction plus the offset of the reloc into that
10682 (input_section->output_section->vma +
10683 input_section->output_offset +
10685 A is the addend, converted into bytes, ie:
10686 (signed_addend * 4)
10688 Note: None of these operations have knowledge of the pipeline
10689 size of the processor, thus it is up to the assembler to
10690 encode this information into the addend. */
10691 value -= (input_section->output_section->vma
10692 + input_section->output_offset);
10693 value -= rel->r_offset;
10694 value += signed_addend;
10696 signed_addend = value;
10697 signed_addend >>= howto->rightshift;
10699 /* A branch to an undefined weak symbol is turned into a jump to
10700 the next instruction unless a PLT entry will be created.
10701 Do the same for local undefined symbols (but not for STN_UNDEF).
10702 The jump to the next instruction is optimized as a NOP depending
10703 on the architecture. */
10704 if (h ? (h->root.type == bfd_link_hash_undefweak
10705 && plt_offset == (bfd_vma) -1)
10706 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10708 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10710 if (arch_has_arm_nop (globals))
10711 value |= 0x0320f000;
10713 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10717 /* Perform a signed range check. */
10718 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10719 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10720 return bfd_reloc_overflow;
10722 addend = (value & 2);
10724 value = (signed_addend & howto->dst_mask)
10725 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10727 if (r_type == R_ARM_CALL)
10729 /* Set the H bit in the BLX instruction. */
10730 if (branch_type == ST_BRANCH_TO_THUMB)
10733 value |= (1 << 24);
10735 value &= ~(bfd_vma)(1 << 24);
10738 /* Select the correct instruction (BL or BLX). */
10739 /* Only if we are not handling a BL to a stub. In this
10740 case, mode switching is performed by the stub. */
10741 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10742 value |= (1 << 28);
10743 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10745 value &= ~(bfd_vma)(1 << 28);
10746 value |= (1 << 24);
10755 if (branch_type == ST_BRANCH_TO_THUMB)
10759 case R_ARM_ABS32_NOI:
10765 if (branch_type == ST_BRANCH_TO_THUMB)
10767 value -= (input_section->output_section->vma
10768 + input_section->output_offset + rel->r_offset);
10771 case R_ARM_REL32_NOI:
10773 value -= (input_section->output_section->vma
10774 + input_section->output_offset + rel->r_offset);
10778 value -= (input_section->output_section->vma
10779 + input_section->output_offset + rel->r_offset);
10780 value += signed_addend;
10781 if (! h || h->root.type != bfd_link_hash_undefweak)
10783 /* Check for overflow. */
10784 if ((value ^ (value >> 1)) & (1 << 30))
10785 return bfd_reloc_overflow;
10787 value &= 0x7fffffff;
10788 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10789 if (branch_type == ST_BRANCH_TO_THUMB)
10794 bfd_put_32 (input_bfd, value, hit_data);
10795 return bfd_reloc_ok;
10800 /* There is no way to tell whether the user intended to use a signed or
10801 unsigned addend. When checking for overflow we accept either,
10802 as specified by the AAELF. */
10803 if ((long) value > 0xff || (long) value < -0x80)
10804 return bfd_reloc_overflow;
10806 bfd_put_8 (input_bfd, value, hit_data);
10807 return bfd_reloc_ok;
10812 /* See comment for R_ARM_ABS8. */
10813 if ((long) value > 0xffff || (long) value < -0x8000)
10814 return bfd_reloc_overflow;
10816 bfd_put_16 (input_bfd, value, hit_data);
10817 return bfd_reloc_ok;
10819 case R_ARM_THM_ABS5:
10820 /* Support ldr and str instructions for the thumb. */
10821 if (globals->use_rel)
10823 /* Need to refetch addend. */
10824 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10825 /* ??? Need to determine shift amount from operand size. */
10826 addend >>= howto->rightshift;
10830 /* ??? Isn't value unsigned? */
10831 if ((long) value > 0x1f || (long) value < -0x10)
10832 return bfd_reloc_overflow;
10834 /* ??? Value needs to be properly shifted into place first. */
10835 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10836 bfd_put_16 (input_bfd, value, hit_data);
10837 return bfd_reloc_ok;
10839 case R_ARM_THM_ALU_PREL_11_0:
10840 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10843 bfd_signed_vma relocation;
10845 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10846 | bfd_get_16 (input_bfd, hit_data + 2);
10848 if (globals->use_rel)
10850 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10851 | ((insn & (1 << 26)) >> 15);
10852 if (insn & 0xf00000)
10853 signed_addend = -signed_addend;
10856 relocation = value + signed_addend;
10857 relocation -= Pa (input_section->output_section->vma
10858 + input_section->output_offset
10861 /* PR 21523: Use an absolute value. The user of this reloc will
10862 have already selected an ADD or SUB insn appropriately. */
10863 value = llabs (relocation);
10865 if (value >= 0x1000)
10866 return bfd_reloc_overflow;
10868 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10869 if (branch_type == ST_BRANCH_TO_THUMB)
10872 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10873 | ((value & 0x700) << 4)
10874 | ((value & 0x800) << 15);
10875 if (relocation < 0)
10878 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10879 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10881 return bfd_reloc_ok;
10884 case R_ARM_THM_PC8:
10885 /* PR 10073: This reloc is not generated by the GNU toolchain,
10886 but it is supported for compatibility with third party libraries
10887 generated by other compilers, specifically the ARM/IAR. */
10890 bfd_signed_vma relocation;
10892 insn = bfd_get_16 (input_bfd, hit_data);
10894 if (globals->use_rel)
10895 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10897 relocation = value + addend;
10898 relocation -= Pa (input_section->output_section->vma
10899 + input_section->output_offset
10902 value = relocation;
10904 /* We do not check for overflow of this reloc. Although strictly
10905 speaking this is incorrect, it appears to be necessary in order
10906 to work with IAR generated relocs. Since GCC and GAS do not
10907 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10908 a problem for them. */
10911 insn = (insn & 0xff00) | (value >> 2);
10913 bfd_put_16 (input_bfd, insn, hit_data);
10915 return bfd_reloc_ok;
10918 case R_ARM_THM_PC12:
10919 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10922 bfd_signed_vma relocation;
10924 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10925 | bfd_get_16 (input_bfd, hit_data + 2);
10927 if (globals->use_rel)
10929 signed_addend = insn & 0xfff;
10930 if (!(insn & (1 << 23)))
10931 signed_addend = -signed_addend;
10934 relocation = value + signed_addend;
10935 relocation -= Pa (input_section->output_section->vma
10936 + input_section->output_offset
10939 value = relocation;
10941 if (value >= 0x1000)
10942 return bfd_reloc_overflow;
10944 insn = (insn & 0xff7ff000) | value;
10945 if (relocation >= 0)
10948 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10949 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10951 return bfd_reloc_ok;
10954 case R_ARM_THM_XPC22:
10955 case R_ARM_THM_CALL:
10956 case R_ARM_THM_JUMP24:
10957 /* Thumb BL (branch long instruction). */
10959 bfd_vma relocation;
10960 bfd_vma reloc_sign;
10961 bfd_boolean overflow = FALSE;
10962 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10963 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10964 bfd_signed_vma reloc_signed_max;
10965 bfd_signed_vma reloc_signed_min;
10967 bfd_signed_vma signed_check;
10969 const int thumb2 = using_thumb2 (globals);
10970 const int thumb2_bl = using_thumb2_bl (globals);
10972 /* A branch to an undefined weak symbol is turned into a jump to
10973 the next instruction unless a PLT entry will be created.
10974 The jump to the next instruction is optimized as a NOP.W for
10975 Thumb-2 enabled architectures. */
10976 if (h && h->root.type == bfd_link_hash_undefweak
10977 && plt_offset == (bfd_vma) -1)
10981 bfd_put_16 (input_bfd, 0xf3af, hit_data);
10982 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
10986 bfd_put_16 (input_bfd, 0xe000, hit_data);
10987 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
10989 return bfd_reloc_ok;
10992 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
10993 with Thumb-1) involving the J1 and J2 bits. */
10994 if (globals->use_rel)
10996 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
10997 bfd_vma upper = upper_insn & 0x3ff;
10998 bfd_vma lower = lower_insn & 0x7ff;
10999 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11000 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11001 bfd_vma i1 = j1 ^ s ? 0 : 1;
11002 bfd_vma i2 = j2 ^ s ? 0 : 1;
11004 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11006 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11008 signed_addend = addend;
11011 if (r_type == R_ARM_THM_XPC22)
11013 /* Check for Thumb to Thumb call. */
11014 /* FIXME: Should we translate the instruction into a BL
11015 instruction instead ? */
11016 if (branch_type == ST_BRANCH_TO_THUMB)
11018 (_("%pB: warning: %s BLX instruction targets"
11019 " %s function '%s'"),
11020 input_bfd, "Thumb",
11021 "Thumb", h ? h->root.root.string : "(local)");
11025 /* If it is not a call to Thumb, assume call to Arm.
11026 If it is a call relative to a section name, then it is not a
11027 function call at all, but rather a long jump. Calls through
11028 the PLT do not require stubs. */
11029 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11031 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11033 /* Convert BL to BLX. */
11034 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11036 else if (( r_type != R_ARM_THM_CALL)
11037 && (r_type != R_ARM_THM_JUMP24))
11039 if (elf32_thumb_to_arm_stub
11040 (info, sym_name, input_bfd, output_bfd, input_section,
11041 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11043 return bfd_reloc_ok;
11045 return bfd_reloc_dangerous;
11048 else if (branch_type == ST_BRANCH_TO_THUMB
11049 && globals->use_blx
11050 && r_type == R_ARM_THM_CALL)
11052 /* Make sure this is a BL. */
11053 lower_insn |= 0x1800;
11057 enum elf32_arm_stub_type stub_type = arm_stub_none;
11058 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11060 /* Check if a stub has to be inserted because the destination
11062 struct elf32_arm_stub_hash_entry *stub_entry;
11063 struct elf32_arm_link_hash_entry *hash;
11065 hash = (struct elf32_arm_link_hash_entry *) h;
11067 stub_type = arm_type_of_stub (info, input_section, rel,
11068 st_type, &branch_type,
11069 hash, value, sym_sec,
11070 input_bfd, sym_name);
11072 if (stub_type != arm_stub_none)
11074 /* The target is out of reach or we are changing modes, so
11075 redirect the branch to the local stub for this
11077 stub_entry = elf32_arm_get_stub_entry (input_section,
11081 if (stub_entry != NULL)
11083 value = (stub_entry->stub_offset
11084 + stub_entry->stub_sec->output_offset
11085 + stub_entry->stub_sec->output_section->vma);
11087 if (plt_offset != (bfd_vma) -1)
11088 *unresolved_reloc_p = FALSE;
11091 /* If this call becomes a call to Arm, force BLX. */
11092 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11095 && !arm_stub_is_thumb (stub_entry->stub_type))
11096 || branch_type != ST_BRANCH_TO_THUMB)
11097 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11102 /* Handle calls via the PLT. */
11103 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11105 value = (splt->output_section->vma
11106 + splt->output_offset
11109 if (globals->use_blx
11110 && r_type == R_ARM_THM_CALL
11111 && ! using_thumb_only (globals))
11113 /* If the Thumb BLX instruction is available, convert
11114 the BL to a BLX instruction to call the ARM-mode
11116 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11117 branch_type = ST_BRANCH_TO_ARM;
11121 if (! using_thumb_only (globals))
11122 /* Target the Thumb stub before the ARM PLT entry. */
11123 value -= PLT_THUMB_STUB_SIZE;
11124 branch_type = ST_BRANCH_TO_THUMB;
11126 *unresolved_reloc_p = FALSE;
11129 relocation = value + signed_addend;
11131 relocation -= (input_section->output_section->vma
11132 + input_section->output_offset
11135 check = relocation >> howto->rightshift;
11137 /* If this is a signed value, the rightshift just dropped
11138 leading 1 bits (assuming twos complement). */
11139 if ((bfd_signed_vma) relocation >= 0)
11140 signed_check = check;
11142 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11144 /* Calculate the permissable maximum and minimum values for
11145 this relocation according to whether we're relocating for
11147 bitsize = howto->bitsize;
11150 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11151 reloc_signed_min = ~reloc_signed_max;
11153 /* Assumes two's complement. */
11154 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11157 if ((lower_insn & 0x5000) == 0x4000)
11158 /* For a BLX instruction, make sure that the relocation is rounded up
11159 to a word boundary. This follows the semantics of the instruction
11160 which specifies that bit 1 of the target address will come from bit
11161 1 of the base address. */
11162 relocation = (relocation + 2) & ~ 3;
11164 /* Put RELOCATION back into the insn. Assumes two's complement.
11165 We use the Thumb-2 encoding, which is safe even if dealing with
11166 a Thumb-1 instruction by virtue of our overflow check above. */
11167 reloc_sign = (signed_check < 0) ? 1 : 0;
11168 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11169 | ((relocation >> 12) & 0x3ff)
11170 | (reloc_sign << 10);
11171 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11172 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11173 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11174 | ((relocation >> 1) & 0x7ff);
11176 /* Put the relocated value back in the object file: */
11177 bfd_put_16 (input_bfd, upper_insn, hit_data);
11178 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11180 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11184 case R_ARM_THM_JUMP19:
11185 /* Thumb32 conditional branch instruction. */
11187 bfd_vma relocation;
11188 bfd_boolean overflow = FALSE;
11189 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11190 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11191 bfd_signed_vma reloc_signed_max = 0xffffe;
11192 bfd_signed_vma reloc_signed_min = -0x100000;
11193 bfd_signed_vma signed_check;
11194 enum elf32_arm_stub_type stub_type = arm_stub_none;
11195 struct elf32_arm_stub_hash_entry *stub_entry;
11196 struct elf32_arm_link_hash_entry *hash;
11198 /* Need to refetch the addend, reconstruct the top three bits,
11199 and squish the two 11 bit pieces together. */
11200 if (globals->use_rel)
11202 bfd_vma S = (upper_insn & 0x0400) >> 10;
11203 bfd_vma upper = (upper_insn & 0x003f);
11204 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11205 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11206 bfd_vma lower = (lower_insn & 0x07ff);
11210 upper |= (!S) << 8;
11211 upper -= 0x0100; /* Sign extend. */
11213 addend = (upper << 12) | (lower << 1);
11214 signed_addend = addend;
11217 /* Handle calls via the PLT. */
11218 if (plt_offset != (bfd_vma) -1)
11220 value = (splt->output_section->vma
11221 + splt->output_offset
11223 /* Target the Thumb stub before the ARM PLT entry. */
11224 value -= PLT_THUMB_STUB_SIZE;
11225 *unresolved_reloc_p = FALSE;
11228 hash = (struct elf32_arm_link_hash_entry *)h;
11230 stub_type = arm_type_of_stub (info, input_section, rel,
11231 st_type, &branch_type,
11232 hash, value, sym_sec,
11233 input_bfd, sym_name);
11234 if (stub_type != arm_stub_none)
11236 stub_entry = elf32_arm_get_stub_entry (input_section,
11240 if (stub_entry != NULL)
11242 value = (stub_entry->stub_offset
11243 + stub_entry->stub_sec->output_offset
11244 + stub_entry->stub_sec->output_section->vma);
11248 relocation = value + signed_addend;
11249 relocation -= (input_section->output_section->vma
11250 + input_section->output_offset
11252 signed_check = (bfd_signed_vma) relocation;
11254 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11257 /* Put RELOCATION back into the insn. */
11259 bfd_vma S = (relocation & 0x00100000) >> 20;
11260 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11261 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11262 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11263 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11265 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11266 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11269 /* Put the relocated value back in the object file: */
11270 bfd_put_16 (input_bfd, upper_insn, hit_data);
11271 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11273 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11276 case R_ARM_THM_JUMP11:
11277 case R_ARM_THM_JUMP8:
11278 case R_ARM_THM_JUMP6:
11279 /* Thumb B (branch) instruction). */
11281 bfd_signed_vma relocation;
11282 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11283 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11284 bfd_signed_vma signed_check;
11286 /* CZB cannot jump backward. */
11287 if (r_type == R_ARM_THM_JUMP6)
11289 reloc_signed_min = 0;
11290 if (globals->use_rel)
11291 signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11294 relocation = value + signed_addend;
11296 relocation -= (input_section->output_section->vma
11297 + input_section->output_offset
11300 relocation >>= howto->rightshift;
11301 signed_check = relocation;
11303 if (r_type == R_ARM_THM_JUMP6)
11304 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11306 relocation &= howto->dst_mask;
11307 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11309 bfd_put_16 (input_bfd, relocation, hit_data);
11311 /* Assumes two's complement. */
11312 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11313 return bfd_reloc_overflow;
11315 return bfd_reloc_ok;
11318 case R_ARM_ALU_PCREL7_0:
11319 case R_ARM_ALU_PCREL15_8:
11320 case R_ARM_ALU_PCREL23_15:
11323 bfd_vma relocation;
11325 insn = bfd_get_32 (input_bfd, hit_data);
11326 if (globals->use_rel)
11328 /* Extract the addend. */
11329 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11330 signed_addend = addend;
11332 relocation = value + signed_addend;
11334 relocation -= (input_section->output_section->vma
11335 + input_section->output_offset
11337 insn = (insn & ~0xfff)
11338 | ((howto->bitpos << 7) & 0xf00)
11339 | ((relocation >> howto->bitpos) & 0xff);
11340 bfd_put_32 (input_bfd, value, hit_data);
11342 return bfd_reloc_ok;
11344 case R_ARM_GNU_VTINHERIT:
11345 case R_ARM_GNU_VTENTRY:
11346 return bfd_reloc_ok;
11348 case R_ARM_GOTOFF32:
11349 /* Relocation is relative to the start of the
11350 global offset table. */
11352 BFD_ASSERT (sgot != NULL);
11354 return bfd_reloc_notsupported;
11356 /* If we are addressing a Thumb function, we need to adjust the
11357 address by one, so that attempts to call the function pointer will
11358 correctly interpret it as Thumb code. */
11359 if (branch_type == ST_BRANCH_TO_THUMB)
11362 /* Note that sgot->output_offset is not involved in this
11363 calculation. We always want the start of .got. If we
11364 define _GLOBAL_OFFSET_TABLE in a different way, as is
11365 permitted by the ABI, we might have to change this
11367 value -= sgot->output_section->vma;
11368 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11369 contents, rel->r_offset, value,
11373 /* Use global offset table as symbol value. */
11374 BFD_ASSERT (sgot != NULL);
11377 return bfd_reloc_notsupported;
11379 *unresolved_reloc_p = FALSE;
11380 value = sgot->output_section->vma;
11381 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11382 contents, rel->r_offset, value,
11386 case R_ARM_GOT_PREL:
11387 /* Relocation is to the entry for this symbol in the
11388 global offset table. */
11390 return bfd_reloc_notsupported;
11392 if (dynreloc_st_type == STT_GNU_IFUNC
11393 && plt_offset != (bfd_vma) -1
11394 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11396 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11397 symbol, and the relocation resolves directly to the runtime
11398 target rather than to the .iplt entry. This means that any
11399 .got entry would be the same value as the .igot.plt entry,
11400 so there's no point creating both. */
11401 sgot = globals->root.igotplt;
11402 value = sgot->output_offset + gotplt_offset;
11404 else if (h != NULL)
11408 off = h->got.offset;
11409 BFD_ASSERT (off != (bfd_vma) -1);
11410 if ((off & 1) != 0)
11412 /* We have already processsed one GOT relocation against
11415 if (globals->root.dynamic_sections_created
11416 && !SYMBOL_REFERENCES_LOCAL (info, h))
11417 *unresolved_reloc_p = FALSE;
11421 Elf_Internal_Rela outrel;
11424 if (((h->dynindx != -1) || globals->fdpic_p)
11425 && !SYMBOL_REFERENCES_LOCAL (info, h))
11427 /* If the symbol doesn't resolve locally in a static
11428 object, we have an undefined reference. If the
11429 symbol doesn't resolve locally in a dynamic object,
11430 it should be resolved by the dynamic linker. */
11431 if (globals->root.dynamic_sections_created)
11433 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11434 *unresolved_reloc_p = FALSE;
11438 outrel.r_addend = 0;
11442 if (dynreloc_st_type == STT_GNU_IFUNC)
11443 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11444 else if (bfd_link_pic (info)
11445 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11446 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11450 if (globals->fdpic_p)
11453 outrel.r_addend = dynreloc_value;
11456 /* The GOT entry is initialized to zero by default.
11457 See if we should install a different value. */
11458 if (outrel.r_addend != 0
11459 && (globals->use_rel || outrel.r_info == 0))
11461 bfd_put_32 (output_bfd, outrel.r_addend,
11462 sgot->contents + off);
11463 outrel.r_addend = 0;
11467 arm_elf_add_rofixup (output_bfd,
11468 elf32_arm_hash_table(info)->srofixup,
11469 sgot->output_section->vma
11470 + sgot->output_offset + off);
11472 else if (outrel.r_info != 0)
11474 outrel.r_offset = (sgot->output_section->vma
11475 + sgot->output_offset
11477 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11480 h->got.offset |= 1;
11482 value = sgot->output_offset + off;
11488 BFD_ASSERT (local_got_offsets != NULL
11489 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11491 off = local_got_offsets[r_symndx];
11493 /* The offset must always be a multiple of 4. We use the
11494 least significant bit to record whether we have already
11495 generated the necessary reloc. */
11496 if ((off & 1) != 0)
11500 Elf_Internal_Rela outrel;
11503 if (dynreloc_st_type == STT_GNU_IFUNC)
11504 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11505 else if (bfd_link_pic (info))
11506 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11510 if (globals->fdpic_p)
11514 /* The GOT entry is initialized to zero by default.
11515 See if we should install a different value. */
11516 if (globals->use_rel || outrel.r_info == 0)
11517 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11520 arm_elf_add_rofixup (output_bfd,
11522 sgot->output_section->vma
11523 + sgot->output_offset + off);
11525 else if (outrel.r_info != 0)
11527 outrel.r_addend = addend + dynreloc_value;
11528 outrel.r_offset = (sgot->output_section->vma
11529 + sgot->output_offset
11531 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11534 local_got_offsets[r_symndx] |= 1;
11537 value = sgot->output_offset + off;
11539 if (r_type != R_ARM_GOT32)
11540 value += sgot->output_section->vma;
11542 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11543 contents, rel->r_offset, value,
11546 case R_ARM_TLS_LDO32:
11547 value = value - dtpoff_base (info);
11549 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11550 contents, rel->r_offset, value,
11553 case R_ARM_TLS_LDM32:
11554 case R_ARM_TLS_LDM32_FDPIC:
11561 off = globals->tls_ldm_got.offset;
11563 if ((off & 1) != 0)
11567 /* If we don't know the module number, create a relocation
11569 if (bfd_link_dll (info))
11571 Elf_Internal_Rela outrel;
11573 if (srelgot == NULL)
11576 outrel.r_addend = 0;
11577 outrel.r_offset = (sgot->output_section->vma
11578 + sgot->output_offset + off);
11579 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11581 if (globals->use_rel)
11582 bfd_put_32 (output_bfd, outrel.r_addend,
11583 sgot->contents + off);
11585 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11588 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11590 globals->tls_ldm_got.offset |= 1;
11593 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11595 bfd_put_32(output_bfd,
11596 globals->root.sgot->output_offset + off,
11597 contents + rel->r_offset);
11599 return bfd_reloc_ok;
11603 value = sgot->output_section->vma + sgot->output_offset + off
11604 - (input_section->output_section->vma
11605 + input_section->output_offset + rel->r_offset);
11607 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11608 contents, rel->r_offset, value,
11613 case R_ARM_TLS_CALL:
11614 case R_ARM_THM_TLS_CALL:
11615 case R_ARM_TLS_GD32:
11616 case R_ARM_TLS_GD32_FDPIC:
11617 case R_ARM_TLS_IE32:
11618 case R_ARM_TLS_IE32_FDPIC:
11619 case R_ARM_TLS_GOTDESC:
11620 case R_ARM_TLS_DESCSEQ:
11621 case R_ARM_THM_TLS_DESCSEQ:
11623 bfd_vma off, offplt;
11627 BFD_ASSERT (sgot != NULL);
11632 dyn = globals->root.dynamic_sections_created;
11633 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11634 bfd_link_pic (info),
11636 && (!bfd_link_pic (info)
11637 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11639 *unresolved_reloc_p = FALSE;
11642 off = h->got.offset;
11643 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11644 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11648 BFD_ASSERT (local_got_offsets != NULL);
11649 off = local_got_offsets[r_symndx];
11650 offplt = local_tlsdesc_gotents[r_symndx];
11651 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11654 /* Linker relaxations happens from one of the
11655 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11656 if (ELF32_R_TYPE(rel->r_info) != r_type)
11657 tls_type = GOT_TLS_IE;
11659 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11661 if ((off & 1) != 0)
11665 bfd_boolean need_relocs = FALSE;
11666 Elf_Internal_Rela outrel;
11669 /* The GOT entries have not been initialized yet. Do it
11670 now, and emit any relocations. If both an IE GOT and a
11671 GD GOT are necessary, we emit the GD first. */
11673 if ((bfd_link_dll (info) || indx != 0)
11675 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11676 && !resolved_to_zero)
11677 || h->root.type != bfd_link_hash_undefweak))
11679 need_relocs = TRUE;
11680 BFD_ASSERT (srelgot != NULL);
11683 if (tls_type & GOT_TLS_GDESC)
11687 /* We should have relaxed, unless this is an undefined
11689 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11690 || bfd_link_dll (info));
11691 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11692 <= globals->root.sgotplt->size);
11694 outrel.r_addend = 0;
11695 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11696 + globals->root.sgotplt->output_offset
11698 + globals->sgotplt_jump_table_size);
11700 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11701 sreloc = globals->root.srelplt;
11702 loc = sreloc->contents;
11703 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11704 BFD_ASSERT (loc + RELOC_SIZE (globals)
11705 <= sreloc->contents + sreloc->size);
11707 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11709 /* For globals, the first word in the relocation gets
11710 the relocation index and the top bit set, or zero,
11711 if we're binding now. For locals, it gets the
11712 symbol's offset in the tls section. */
11713 bfd_put_32 (output_bfd,
11714 !h ? value - elf_hash_table (info)->tls_sec->vma
11715 : info->flags & DF_BIND_NOW ? 0
11716 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11717 globals->root.sgotplt->contents + offplt
11718 + globals->sgotplt_jump_table_size);
11720 /* Second word in the relocation is always zero. */
11721 bfd_put_32 (output_bfd, 0,
11722 globals->root.sgotplt->contents + offplt
11723 + globals->sgotplt_jump_table_size + 4);
11725 if (tls_type & GOT_TLS_GD)
11729 outrel.r_addend = 0;
11730 outrel.r_offset = (sgot->output_section->vma
11731 + sgot->output_offset
11733 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11735 if (globals->use_rel)
11736 bfd_put_32 (output_bfd, outrel.r_addend,
11737 sgot->contents + cur_off);
11739 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11742 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11743 sgot->contents + cur_off + 4);
11746 outrel.r_addend = 0;
11747 outrel.r_info = ELF32_R_INFO (indx,
11748 R_ARM_TLS_DTPOFF32);
11749 outrel.r_offset += 4;
11751 if (globals->use_rel)
11752 bfd_put_32 (output_bfd, outrel.r_addend,
11753 sgot->contents + cur_off + 4);
11755 elf32_arm_add_dynreloc (output_bfd, info,
11761 /* If we are not emitting relocations for a
11762 general dynamic reference, then we must be in a
11763 static link or an executable link with the
11764 symbol binding locally. Mark it as belonging
11765 to module 1, the executable. */
11766 bfd_put_32 (output_bfd, 1,
11767 sgot->contents + cur_off);
11768 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11769 sgot->contents + cur_off + 4);
11775 if (tls_type & GOT_TLS_IE)
11780 outrel.r_addend = value - dtpoff_base (info);
11782 outrel.r_addend = 0;
11783 outrel.r_offset = (sgot->output_section->vma
11784 + sgot->output_offset
11786 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11788 if (globals->use_rel)
11789 bfd_put_32 (output_bfd, outrel.r_addend,
11790 sgot->contents + cur_off);
11792 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11795 bfd_put_32 (output_bfd, tpoff (info, value),
11796 sgot->contents + cur_off);
11801 h->got.offset |= 1;
11803 local_got_offsets[r_symndx] |= 1;
11806 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11808 else if (tls_type & GOT_TLS_GDESC)
11811 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11812 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11814 bfd_signed_vma offset;
11815 /* TLS stubs are arm mode. The original symbol is a
11816 data object, so branch_type is bogus. */
11817 branch_type = ST_BRANCH_TO_ARM;
11818 enum elf32_arm_stub_type stub_type
11819 = arm_type_of_stub (info, input_section, rel,
11820 st_type, &branch_type,
11821 (struct elf32_arm_link_hash_entry *)h,
11822 globals->tls_trampoline, globals->root.splt,
11823 input_bfd, sym_name);
11825 if (stub_type != arm_stub_none)
11827 struct elf32_arm_stub_hash_entry *stub_entry
11828 = elf32_arm_get_stub_entry
11829 (input_section, globals->root.splt, 0, rel,
11830 globals, stub_type);
11831 offset = (stub_entry->stub_offset
11832 + stub_entry->stub_sec->output_offset
11833 + stub_entry->stub_sec->output_section->vma);
11836 offset = (globals->root.splt->output_section->vma
11837 + globals->root.splt->output_offset
11838 + globals->tls_trampoline);
11840 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11842 unsigned long inst;
11844 offset -= (input_section->output_section->vma
11845 + input_section->output_offset
11846 + rel->r_offset + 8);
11848 inst = offset >> 2;
11849 inst &= 0x00ffffff;
11850 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11854 /* Thumb blx encodes the offset in a complicated
11856 unsigned upper_insn, lower_insn;
11859 offset -= (input_section->output_section->vma
11860 + input_section->output_offset
11861 + rel->r_offset + 4);
11863 if (stub_type != arm_stub_none
11864 && arm_stub_is_thumb (stub_type))
11866 lower_insn = 0xd000;
11870 lower_insn = 0xc000;
11871 /* Round up the offset to a word boundary. */
11872 offset = (offset + 2) & ~2;
11876 upper_insn = (0xf000
11877 | ((offset >> 12) & 0x3ff)
11879 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11880 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11881 | ((offset >> 1) & 0x7ff);
11882 bfd_put_16 (input_bfd, upper_insn, hit_data);
11883 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11884 return bfd_reloc_ok;
11887 /* These relocations needs special care, as besides the fact
11888 they point somewhere in .gotplt, the addend must be
11889 adjusted accordingly depending on the type of instruction
11891 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11893 unsigned long data, insn;
11896 data = bfd_get_signed_32 (input_bfd, hit_data);
11902 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11903 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11904 insn = (insn << 16)
11905 | bfd_get_16 (input_bfd,
11906 contents + rel->r_offset - data + 2);
11907 if ((insn & 0xf800c000) == 0xf000c000)
11910 else if ((insn & 0xffffff00) == 0x4400)
11916 /* xgettext:c-format */
11917 (_("%pB(%pA+%#" PRIx64 "): "
11918 "unexpected %s instruction '%#lx' "
11919 "referenced by TLS_GOTDESC"),
11920 input_bfd, input_section, (uint64_t) rel->r_offset,
11922 return bfd_reloc_notsupported;
11927 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11929 switch (insn >> 24)
11931 case 0xeb: /* bl */
11932 case 0xfa: /* blx */
11936 case 0xe0: /* add */
11942 /* xgettext:c-format */
11943 (_("%pB(%pA+%#" PRIx64 "): "
11944 "unexpected %s instruction '%#lx' "
11945 "referenced by TLS_GOTDESC"),
11946 input_bfd, input_section, (uint64_t) rel->r_offset,
11948 return bfd_reloc_notsupported;
11952 value += ((globals->root.sgotplt->output_section->vma
11953 + globals->root.sgotplt->output_offset + off)
11954 - (input_section->output_section->vma
11955 + input_section->output_offset
11957 + globals->sgotplt_jump_table_size);
11960 value = ((globals->root.sgot->output_section->vma
11961 + globals->root.sgot->output_offset + off)
11962 - (input_section->output_section->vma
11963 + input_section->output_offset + rel->r_offset));
11965 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
11966 r_type == R_ARM_TLS_IE32_FDPIC))
11968 /* For FDPIC relocations, resolve to the offset of the GOT
11969 entry from the start of GOT. */
11970 bfd_put_32(output_bfd,
11971 globals->root.sgot->output_offset + off,
11972 contents + rel->r_offset);
11974 return bfd_reloc_ok;
11978 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11979 contents, rel->r_offset, value,
11984 case R_ARM_TLS_LE32:
11985 if (bfd_link_dll (info))
11988 /* xgettext:c-format */
11989 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
11990 "in shared object"),
11991 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
11992 return bfd_reloc_notsupported;
11995 value = tpoff (info, value);
11997 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11998 contents, rel->r_offset, value,
12002 if (globals->fix_v4bx)
12004 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12006 /* Ensure that we have a BX instruction. */
12007 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12009 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12011 /* Branch to veneer. */
12013 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12014 glue_addr -= input_section->output_section->vma
12015 + input_section->output_offset
12016 + rel->r_offset + 8;
12017 insn = (insn & 0xf0000000) | 0x0a000000
12018 | ((glue_addr >> 2) & 0x00ffffff);
12022 /* Preserve Rm (lowest four bits) and the condition code
12023 (highest four bits). Other bits encode MOV PC,Rm. */
12024 insn = (insn & 0xf000000f) | 0x01a0f000;
12027 bfd_put_32 (input_bfd, insn, hit_data);
12029 return bfd_reloc_ok;
12031 case R_ARM_MOVW_ABS_NC:
12032 case R_ARM_MOVT_ABS:
12033 case R_ARM_MOVW_PREL_NC:
12034 case R_ARM_MOVT_PREL:
12035 /* Until we properly support segment-base-relative addressing then
12036 we assume the segment base to be zero, as for the group relocations.
12037 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12038 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12039 case R_ARM_MOVW_BREL_NC:
12040 case R_ARM_MOVW_BREL:
12041 case R_ARM_MOVT_BREL:
12043 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12045 if (globals->use_rel)
12047 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12048 signed_addend = (addend ^ 0x8000) - 0x8000;
12051 value += signed_addend;
12053 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12054 value -= (input_section->output_section->vma
12055 + input_section->output_offset + rel->r_offset);
12057 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12058 return bfd_reloc_overflow;
12060 if (branch_type == ST_BRANCH_TO_THUMB)
12063 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12064 || r_type == R_ARM_MOVT_BREL)
12067 insn &= 0xfff0f000;
12068 insn |= value & 0xfff;
12069 insn |= (value & 0xf000) << 4;
12070 bfd_put_32 (input_bfd, insn, hit_data);
12072 return bfd_reloc_ok;
12074 case R_ARM_THM_MOVW_ABS_NC:
12075 case R_ARM_THM_MOVT_ABS:
12076 case R_ARM_THM_MOVW_PREL_NC:
12077 case R_ARM_THM_MOVT_PREL:
12078 /* Until we properly support segment-base-relative addressing then
12079 we assume the segment base to be zero, as for the above relocations.
12080 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12081 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12082 as R_ARM_THM_MOVT_ABS. */
12083 case R_ARM_THM_MOVW_BREL_NC:
12084 case R_ARM_THM_MOVW_BREL:
12085 case R_ARM_THM_MOVT_BREL:
12089 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12090 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12092 if (globals->use_rel)
12094 addend = ((insn >> 4) & 0xf000)
12095 | ((insn >> 15) & 0x0800)
12096 | ((insn >> 4) & 0x0700)
12098 signed_addend = (addend ^ 0x8000) - 0x8000;
12101 value += signed_addend;
12103 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12104 value -= (input_section->output_section->vma
12105 + input_section->output_offset + rel->r_offset);
12107 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12108 return bfd_reloc_overflow;
12110 if (branch_type == ST_BRANCH_TO_THUMB)
12113 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12114 || r_type == R_ARM_THM_MOVT_BREL)
12117 insn &= 0xfbf08f00;
12118 insn |= (value & 0xf000) << 4;
12119 insn |= (value & 0x0800) << 15;
12120 insn |= (value & 0x0700) << 4;
12121 insn |= (value & 0x00ff);
12123 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12124 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12126 return bfd_reloc_ok;
12128 case R_ARM_ALU_PC_G0_NC:
12129 case R_ARM_ALU_PC_G1_NC:
12130 case R_ARM_ALU_PC_G0:
12131 case R_ARM_ALU_PC_G1:
12132 case R_ARM_ALU_PC_G2:
12133 case R_ARM_ALU_SB_G0_NC:
12134 case R_ARM_ALU_SB_G1_NC:
12135 case R_ARM_ALU_SB_G0:
12136 case R_ARM_ALU_SB_G1:
12137 case R_ARM_ALU_SB_G2:
12139 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12140 bfd_vma pc = input_section->output_section->vma
12141 + input_section->output_offset + rel->r_offset;
12142 /* sb is the origin of the *segment* containing the symbol. */
12143 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12146 bfd_signed_vma signed_value;
12149 /* Determine which group of bits to select. */
12152 case R_ARM_ALU_PC_G0_NC:
12153 case R_ARM_ALU_PC_G0:
12154 case R_ARM_ALU_SB_G0_NC:
12155 case R_ARM_ALU_SB_G0:
12159 case R_ARM_ALU_PC_G1_NC:
12160 case R_ARM_ALU_PC_G1:
12161 case R_ARM_ALU_SB_G1_NC:
12162 case R_ARM_ALU_SB_G1:
12166 case R_ARM_ALU_PC_G2:
12167 case R_ARM_ALU_SB_G2:
12175 /* If REL, extract the addend from the insn. If RELA, it will
12176 have already been fetched for us. */
12177 if (globals->use_rel)
12180 bfd_vma constant = insn & 0xff;
12181 bfd_vma rotation = (insn & 0xf00) >> 8;
12184 signed_addend = constant;
12187 /* Compensate for the fact that in the instruction, the
12188 rotation is stored in multiples of 2 bits. */
12191 /* Rotate "constant" right by "rotation" bits. */
12192 signed_addend = (constant >> rotation) |
12193 (constant << (8 * sizeof (bfd_vma) - rotation));
12196 /* Determine if the instruction is an ADD or a SUB.
12197 (For REL, this determines the sign of the addend.) */
12198 negative = identify_add_or_sub (insn);
12202 /* xgettext:c-format */
12203 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12204 "are allowed for ALU group relocations"),
12205 input_bfd, input_section, (uint64_t) rel->r_offset);
12206 return bfd_reloc_overflow;
12209 signed_addend *= negative;
12212 /* Compute the value (X) to go in the place. */
12213 if (r_type == R_ARM_ALU_PC_G0_NC
12214 || r_type == R_ARM_ALU_PC_G1_NC
12215 || r_type == R_ARM_ALU_PC_G0
12216 || r_type == R_ARM_ALU_PC_G1
12217 || r_type == R_ARM_ALU_PC_G2)
12219 signed_value = value - pc + signed_addend;
12221 /* Section base relative. */
12222 signed_value = value - sb + signed_addend;
12224 /* If the target symbol is a Thumb function, then set the
12225 Thumb bit in the address. */
12226 if (branch_type == ST_BRANCH_TO_THUMB)
12229 /* Calculate the value of the relevant G_n, in encoded
12230 constant-with-rotation format. */
12231 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12234 /* Check for overflow if required. */
12235 if ((r_type == R_ARM_ALU_PC_G0
12236 || r_type == R_ARM_ALU_PC_G1
12237 || r_type == R_ARM_ALU_PC_G2
12238 || r_type == R_ARM_ALU_SB_G0
12239 || r_type == R_ARM_ALU_SB_G1
12240 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12243 /* xgettext:c-format */
12244 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12245 "splitting %#" PRIx64 " for group relocation %s"),
12246 input_bfd, input_section, (uint64_t) rel->r_offset,
12247 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12249 return bfd_reloc_overflow;
12252 /* Mask out the value and the ADD/SUB part of the opcode; take care
12253 not to destroy the S bit. */
12254 insn &= 0xff1ff000;
12256 /* Set the opcode according to whether the value to go in the
12257 place is negative. */
12258 if (signed_value < 0)
12263 /* Encode the offset. */
12266 bfd_put_32 (input_bfd, insn, hit_data);
12268 return bfd_reloc_ok;
12270 case R_ARM_LDR_PC_G0:
12271 case R_ARM_LDR_PC_G1:
12272 case R_ARM_LDR_PC_G2:
12273 case R_ARM_LDR_SB_G0:
12274 case R_ARM_LDR_SB_G1:
12275 case R_ARM_LDR_SB_G2:
12277 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12278 bfd_vma pc = input_section->output_section->vma
12279 + input_section->output_offset + rel->r_offset;
12280 /* sb is the origin of the *segment* containing the symbol. */
12281 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12283 bfd_signed_vma signed_value;
12286 /* Determine which groups of bits to calculate. */
12289 case R_ARM_LDR_PC_G0:
12290 case R_ARM_LDR_SB_G0:
12294 case R_ARM_LDR_PC_G1:
12295 case R_ARM_LDR_SB_G1:
12299 case R_ARM_LDR_PC_G2:
12300 case R_ARM_LDR_SB_G2:
12308 /* If REL, extract the addend from the insn. If RELA, it will
12309 have already been fetched for us. */
12310 if (globals->use_rel)
12312 int negative = (insn & (1 << 23)) ? 1 : -1;
12313 signed_addend = negative * (insn & 0xfff);
12316 /* Compute the value (X) to go in the place. */
12317 if (r_type == R_ARM_LDR_PC_G0
12318 || r_type == R_ARM_LDR_PC_G1
12319 || r_type == R_ARM_LDR_PC_G2)
12321 signed_value = value - pc + signed_addend;
12323 /* Section base relative. */
12324 signed_value = value - sb + signed_addend;
12326 /* Calculate the value of the relevant G_{n-1} to obtain
12327 the residual at that stage. */
12328 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12329 group - 1, &residual);
12331 /* Check for overflow. */
12332 if (residual >= 0x1000)
12335 /* xgettext:c-format */
12336 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12337 "splitting %#" PRIx64 " for group relocation %s"),
12338 input_bfd, input_section, (uint64_t) rel->r_offset,
12339 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12341 return bfd_reloc_overflow;
12344 /* Mask out the value and U bit. */
12345 insn &= 0xff7ff000;
12347 /* Set the U bit if the value to go in the place is non-negative. */
12348 if (signed_value >= 0)
12351 /* Encode the offset. */
12354 bfd_put_32 (input_bfd, insn, hit_data);
12356 return bfd_reloc_ok;
12358 case R_ARM_LDRS_PC_G0:
12359 case R_ARM_LDRS_PC_G1:
12360 case R_ARM_LDRS_PC_G2:
12361 case R_ARM_LDRS_SB_G0:
12362 case R_ARM_LDRS_SB_G1:
12363 case R_ARM_LDRS_SB_G2:
12365 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12366 bfd_vma pc = input_section->output_section->vma
12367 + input_section->output_offset + rel->r_offset;
12368 /* sb is the origin of the *segment* containing the symbol. */
12369 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12371 bfd_signed_vma signed_value;
12374 /* Determine which groups of bits to calculate. */
12377 case R_ARM_LDRS_PC_G0:
12378 case R_ARM_LDRS_SB_G0:
12382 case R_ARM_LDRS_PC_G1:
12383 case R_ARM_LDRS_SB_G1:
12387 case R_ARM_LDRS_PC_G2:
12388 case R_ARM_LDRS_SB_G2:
12396 /* If REL, extract the addend from the insn. If RELA, it will
12397 have already been fetched for us. */
12398 if (globals->use_rel)
12400 int negative = (insn & (1 << 23)) ? 1 : -1;
12401 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12404 /* Compute the value (X) to go in the place. */
12405 if (r_type == R_ARM_LDRS_PC_G0
12406 || r_type == R_ARM_LDRS_PC_G1
12407 || r_type == R_ARM_LDRS_PC_G2)
12409 signed_value = value - pc + signed_addend;
12411 /* Section base relative. */
12412 signed_value = value - sb + signed_addend;
12414 /* Calculate the value of the relevant G_{n-1} to obtain
12415 the residual at that stage. */
12416 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12417 group - 1, &residual);
12419 /* Check for overflow. */
12420 if (residual >= 0x100)
12423 /* xgettext:c-format */
12424 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12425 "splitting %#" PRIx64 " for group relocation %s"),
12426 input_bfd, input_section, (uint64_t) rel->r_offset,
12427 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12429 return bfd_reloc_overflow;
12432 /* Mask out the value and U bit. */
12433 insn &= 0xff7ff0f0;
12435 /* Set the U bit if the value to go in the place is non-negative. */
12436 if (signed_value >= 0)
12439 /* Encode the offset. */
12440 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12442 bfd_put_32 (input_bfd, insn, hit_data);
12444 return bfd_reloc_ok;
12446 case R_ARM_LDC_PC_G0:
12447 case R_ARM_LDC_PC_G1:
12448 case R_ARM_LDC_PC_G2:
12449 case R_ARM_LDC_SB_G0:
12450 case R_ARM_LDC_SB_G1:
12451 case R_ARM_LDC_SB_G2:
12453 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12454 bfd_vma pc = input_section->output_section->vma
12455 + input_section->output_offset + rel->r_offset;
12456 /* sb is the origin of the *segment* containing the symbol. */
12457 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12459 bfd_signed_vma signed_value;
12462 /* Determine which groups of bits to calculate. */
12465 case R_ARM_LDC_PC_G0:
12466 case R_ARM_LDC_SB_G0:
12470 case R_ARM_LDC_PC_G1:
12471 case R_ARM_LDC_SB_G1:
12475 case R_ARM_LDC_PC_G2:
12476 case R_ARM_LDC_SB_G2:
12484 /* If REL, extract the addend from the insn. If RELA, it will
12485 have already been fetched for us. */
12486 if (globals->use_rel)
12488 int negative = (insn & (1 << 23)) ? 1 : -1;
12489 signed_addend = negative * ((insn & 0xff) << 2);
12492 /* Compute the value (X) to go in the place. */
12493 if (r_type == R_ARM_LDC_PC_G0
12494 || r_type == R_ARM_LDC_PC_G1
12495 || r_type == R_ARM_LDC_PC_G2)
12497 signed_value = value - pc + signed_addend;
12499 /* Section base relative. */
12500 signed_value = value - sb + signed_addend;
12502 /* Calculate the value of the relevant G_{n-1} to obtain
12503 the residual at that stage. */
12504 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12505 group - 1, &residual);
12507 /* Check for overflow. (The absolute value to go in the place must be
12508 divisible by four and, after having been divided by four, must
12509 fit in eight bits.) */
12510 if ((residual & 0x3) != 0 || residual >= 0x400)
12513 /* xgettext:c-format */
12514 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12515 "splitting %#" PRIx64 " for group relocation %s"),
12516 input_bfd, input_section, (uint64_t) rel->r_offset,
12517 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12519 return bfd_reloc_overflow;
12522 /* Mask out the value and U bit. */
12523 insn &= 0xff7fff00;
12525 /* Set the U bit if the value to go in the place is non-negative. */
12526 if (signed_value >= 0)
12529 /* Encode the offset. */
12530 insn |= residual >> 2;
12532 bfd_put_32 (input_bfd, insn, hit_data);
12534 return bfd_reloc_ok;
12536 case R_ARM_THM_ALU_ABS_G0_NC:
12537 case R_ARM_THM_ALU_ABS_G1_NC:
12538 case R_ARM_THM_ALU_ABS_G2_NC:
12539 case R_ARM_THM_ALU_ABS_G3_NC:
12541 const int shift_array[4] = {0, 8, 16, 24};
12542 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12543 bfd_vma addr = value;
12544 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12546 /* Compute address. */
12547 if (globals->use_rel)
12548 signed_addend = insn & 0xff;
12549 addr += signed_addend;
12550 if (branch_type == ST_BRANCH_TO_THUMB)
12552 /* Clean imm8 insn. */
12554 /* And update with correct part of address. */
12555 insn |= (addr >> shift) & 0xff;
12557 bfd_put_16 (input_bfd, insn, hit_data);
12560 *unresolved_reloc_p = FALSE;
12561 return bfd_reloc_ok;
12563 case R_ARM_GOTOFFFUNCDESC:
12567 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12568 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12569 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12570 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12573 if (bfd_link_pic(info) && dynindx == 0)
12576 /* Resolve relocation. */
12577 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12578 , contents + rel->r_offset);
12579 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12581 arm_elf_fill_funcdesc(output_bfd, info,
12582 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12583 dynindx, offset, addr, dynreloc_value, seg);
12588 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12592 /* For static binaries, sym_sec can be null. */
12595 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12596 addr = dynreloc_value - sym_sec->output_section->vma;
12604 if (bfd_link_pic(info) && dynindx == 0)
12607 /* This case cannot occur since funcdesc is allocated by
12608 the dynamic loader so we cannot resolve the relocation. */
12609 if (h->dynindx != -1)
12612 /* Resolve relocation. */
12613 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12614 contents + rel->r_offset);
12615 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12616 arm_elf_fill_funcdesc(output_bfd, info,
12617 &eh->fdpic_cnts.funcdesc_offset,
12618 dynindx, offset, addr, dynreloc_value, seg);
12621 *unresolved_reloc_p = FALSE;
12622 return bfd_reloc_ok;
12624 case R_ARM_GOTFUNCDESC:
12628 Elf_Internal_Rela outrel;
12630 /* Resolve relocation. */
12631 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12632 + sgot->output_offset),
12633 contents + rel->r_offset);
12634 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12635 if(h->dynindx == -1)
12638 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12642 /* For static binaries sym_sec can be null. */
12645 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12646 addr = dynreloc_value - sym_sec->output_section->vma;
12654 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12655 arm_elf_fill_funcdesc(output_bfd, info,
12656 &eh->fdpic_cnts.funcdesc_offset,
12657 dynindx, offset, addr, dynreloc_value, seg);
12660 /* Add a dynamic relocation on GOT entry if not already done. */
12661 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12663 if (h->dynindx == -1)
12665 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12666 if (h->root.type == bfd_link_hash_undefweak)
12667 bfd_put_32(output_bfd, 0, sgot->contents
12668 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12670 bfd_put_32(output_bfd, sgot->output_section->vma
12671 + sgot->output_offset
12672 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12674 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12678 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12680 outrel.r_offset = sgot->output_section->vma
12681 + sgot->output_offset
12682 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12683 outrel.r_addend = 0;
12684 if (h->dynindx == -1 && !bfd_link_pic(info))
12685 if (h->root.type == bfd_link_hash_undefweak)
12686 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12688 arm_elf_add_rofixup(output_bfd, globals->srofixup,
12691 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12692 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12697 /* Such relocation on static function should not have been
12698 emitted by the compiler. */
12702 *unresolved_reloc_p = FALSE;
12703 return bfd_reloc_ok;
12705 case R_ARM_FUNCDESC:
12709 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12710 Elf_Internal_Rela outrel;
12711 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12712 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12713 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12716 if (bfd_link_pic(info) && dynindx == 0)
12719 /* Replace static FUNCDESC relocation with a
12720 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12722 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12723 outrel.r_offset = input_section->output_section->vma
12724 + input_section->output_offset + rel->r_offset;
12725 outrel.r_addend = 0;
12726 if (bfd_link_pic(info))
12727 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12729 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12731 bfd_put_32 (input_bfd, sgot->output_section->vma
12732 + sgot->output_offset + offset, hit_data);
12734 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12735 arm_elf_fill_funcdesc(output_bfd, info,
12736 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12737 dynindx, offset, addr, dynreloc_value, seg);
12741 if (h->dynindx == -1)
12744 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12747 Elf_Internal_Rela outrel;
12749 /* For static binaries sym_sec can be null. */
12752 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12753 addr = dynreloc_value - sym_sec->output_section->vma;
12761 if (bfd_link_pic(info) && dynindx == 0)
12764 /* Replace static FUNCDESC relocation with a
12765 R_ARM_RELATIVE dynamic relocation. */
12766 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12767 outrel.r_offset = input_section->output_section->vma
12768 + input_section->output_offset + rel->r_offset;
12769 outrel.r_addend = 0;
12770 if (bfd_link_pic(info))
12771 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12773 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12775 bfd_put_32 (input_bfd, sgot->output_section->vma
12776 + sgot->output_offset + offset, hit_data);
12778 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12779 arm_elf_fill_funcdesc(output_bfd, info,
12780 &eh->fdpic_cnts.funcdesc_offset,
12781 dynindx, offset, addr, dynreloc_value, seg);
12785 Elf_Internal_Rela outrel;
12787 /* Add a dynamic relocation. */
12788 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12789 outrel.r_offset = input_section->output_section->vma
12790 + input_section->output_offset + rel->r_offset;
12791 outrel.r_addend = 0;
12792 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12796 *unresolved_reloc_p = FALSE;
12797 return bfd_reloc_ok;
12799 case R_ARM_THM_BF16:
12801 bfd_vma relocation;
12802 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12803 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12805 if (globals->use_rel)
12807 bfd_vma immA = (upper_insn & 0x001f);
12808 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12809 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12810 addend = (immA << 12);
12811 addend |= (immB << 2);
12812 addend |= (immC << 1);
12815 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12818 relocation = value + signed_addend;
12819 relocation -= (input_section->output_section->vma
12820 + input_section->output_offset
12823 /* Put RELOCATION back into the insn. */
12825 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12826 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12827 bfd_vma immC = (relocation & 0x00000002) >> 1;
12829 upper_insn = (upper_insn & 0xffe0) | immA;
12830 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12833 /* Put the relocated value back in the object file: */
12834 bfd_put_16 (input_bfd, upper_insn, hit_data);
12835 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12837 return bfd_reloc_ok;
12840 case R_ARM_THM_BF12:
12842 bfd_vma relocation;
12843 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12844 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12846 if (globals->use_rel)
12848 bfd_vma immA = (upper_insn & 0x0001);
12849 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12850 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12851 addend = (immA << 12);
12852 addend |= (immB << 2);
12853 addend |= (immC << 1);
12856 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12857 signed_addend = addend;
12860 relocation = value + signed_addend;
12861 relocation -= (input_section->output_section->vma
12862 + input_section->output_offset
12865 /* Put RELOCATION back into the insn. */
12867 bfd_vma immA = (relocation & 0x00001000) >> 12;
12868 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12869 bfd_vma immC = (relocation & 0x00000002) >> 1;
12871 upper_insn = (upper_insn & 0xfffe) | immA;
12872 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12875 /* Put the relocated value back in the object file: */
12876 bfd_put_16 (input_bfd, upper_insn, hit_data);
12877 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12879 return bfd_reloc_ok;
12882 case R_ARM_THM_BF18:
12884 bfd_vma relocation;
12885 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12886 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12888 if (globals->use_rel)
12890 bfd_vma immA = (upper_insn & 0x007f);
12891 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12892 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12893 addend = (immA << 12);
12894 addend |= (immB << 2);
12895 addend |= (immC << 1);
12898 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
12899 signed_addend = addend;
12902 relocation = value + signed_addend;
12903 relocation -= (input_section->output_section->vma
12904 + input_section->output_offset
12907 /* Put RELOCATION back into the insn. */
12909 bfd_vma immA = (relocation & 0x0007f000) >> 12;
12910 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12911 bfd_vma immC = (relocation & 0x00000002) >> 1;
12913 upper_insn = (upper_insn & 0xff80) | immA;
12914 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12917 /* Put the relocated value back in the object file: */
12918 bfd_put_16 (input_bfd, upper_insn, hit_data);
12919 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12921 return bfd_reloc_ok;
12925 return bfd_reloc_notsupported;
12929 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12931 arm_add_to_rel (bfd * abfd,
12932 bfd_byte * address,
12933 reloc_howto_type * howto,
12934 bfd_signed_vma increment)
12936 bfd_signed_vma addend;
12938 if (howto->type == R_ARM_THM_CALL
12939 || howto->type == R_ARM_THM_JUMP24)
12941 int upper_insn, lower_insn;
12944 upper_insn = bfd_get_16 (abfd, address);
12945 lower_insn = bfd_get_16 (abfd, address + 2);
12946 upper = upper_insn & 0x7ff;
12947 lower = lower_insn & 0x7ff;
12949 addend = (upper << 12) | (lower << 1);
12950 addend += increment;
12953 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12954 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12956 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12957 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12963 contents = bfd_get_32 (abfd, address);
12965 /* Get the (signed) value from the instruction. */
12966 addend = contents & howto->src_mask;
12967 if (addend & ((howto->src_mask + 1) >> 1))
12969 bfd_signed_vma mask;
12972 mask &= ~ howto->src_mask;
12976 /* Add in the increment, (which is a byte value). */
12977 switch (howto->type)
12980 addend += increment;
12987 addend <<= howto->size;
12988 addend += increment;
12990 /* Should we check for overflow here ? */
12992 /* Drop any undesired bits. */
12993 addend >>= howto->rightshift;
12997 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12999 bfd_put_32 (abfd, contents, address);
13003 #define IS_ARM_TLS_RELOC(R_TYPE) \
13004 ((R_TYPE) == R_ARM_TLS_GD32 \
13005 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13006 || (R_TYPE) == R_ARM_TLS_LDO32 \
13007 || (R_TYPE) == R_ARM_TLS_LDM32 \
13008 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13009 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13010 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13011 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13012 || (R_TYPE) == R_ARM_TLS_LE32 \
13013 || (R_TYPE) == R_ARM_TLS_IE32 \
13014 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13015 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13017 /* Specific set of relocations for the gnu tls dialect. */
13018 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13019 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13020 || (R_TYPE) == R_ARM_TLS_CALL \
13021 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13022 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13023 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13025 /* Relocate an ARM ELF section. */
13028 elf32_arm_relocate_section (bfd * output_bfd,
13029 struct bfd_link_info * info,
13031 asection * input_section,
13032 bfd_byte * contents,
13033 Elf_Internal_Rela * relocs,
13034 Elf_Internal_Sym * local_syms,
13035 asection ** local_sections)
13037 Elf_Internal_Shdr *symtab_hdr;
13038 struct elf_link_hash_entry **sym_hashes;
13039 Elf_Internal_Rela *rel;
13040 Elf_Internal_Rela *relend;
13042 struct elf32_arm_link_hash_table * globals;
13044 globals = elf32_arm_hash_table (info);
13045 if (globals == NULL)
13048 symtab_hdr = & elf_symtab_hdr (input_bfd);
13049 sym_hashes = elf_sym_hashes (input_bfd);
13052 relend = relocs + input_section->reloc_count;
13053 for (; rel < relend; rel++)
13056 reloc_howto_type * howto;
13057 unsigned long r_symndx;
13058 Elf_Internal_Sym * sym;
13060 struct elf_link_hash_entry * h;
13061 bfd_vma relocation;
13062 bfd_reloc_status_type r;
13065 bfd_boolean unresolved_reloc = FALSE;
13066 char *error_message = NULL;
13068 r_symndx = ELF32_R_SYM (rel->r_info);
13069 r_type = ELF32_R_TYPE (rel->r_info);
13070 r_type = arm_real_reloc_type (globals, r_type);
13072 if ( r_type == R_ARM_GNU_VTENTRY
13073 || r_type == R_ARM_GNU_VTINHERIT)
13076 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13079 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13085 if (r_symndx < symtab_hdr->sh_info)
13087 sym = local_syms + r_symndx;
13088 sym_type = ELF32_ST_TYPE (sym->st_info);
13089 sec = local_sections[r_symndx];
13091 /* An object file might have a reference to a local
13092 undefined symbol. This is a daft object file, but we
13093 should at least do something about it. V4BX & NONE
13094 relocations do not use the symbol and are explicitly
13095 allowed to use the undefined symbol, so allow those.
13096 Likewise for relocations against STN_UNDEF. */
13097 if (r_type != R_ARM_V4BX
13098 && r_type != R_ARM_NONE
13099 && r_symndx != STN_UNDEF
13100 && bfd_is_und_section (sec)
13101 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13102 (*info->callbacks->undefined_symbol)
13103 (info, bfd_elf_string_from_elf_section
13104 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13105 input_bfd, input_section,
13106 rel->r_offset, TRUE);
13108 if (globals->use_rel)
13110 relocation = (sec->output_section->vma
13111 + sec->output_offset
13113 if (!bfd_link_relocatable (info)
13114 && (sec->flags & SEC_MERGE)
13115 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13118 bfd_vma addend, value;
13122 case R_ARM_MOVW_ABS_NC:
13123 case R_ARM_MOVT_ABS:
13124 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13125 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13126 addend = (addend ^ 0x8000) - 0x8000;
13129 case R_ARM_THM_MOVW_ABS_NC:
13130 case R_ARM_THM_MOVT_ABS:
13131 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13133 value |= bfd_get_16 (input_bfd,
13134 contents + rel->r_offset + 2);
13135 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13136 | ((value & 0x04000000) >> 15);
13137 addend = (addend ^ 0x8000) - 0x8000;
13141 if (howto->rightshift
13142 || (howto->src_mask & (howto->src_mask + 1)))
13145 /* xgettext:c-format */
13146 (_("%pB(%pA+%#" PRIx64 "): "
13147 "%s relocation against SEC_MERGE section"),
13148 input_bfd, input_section,
13149 (uint64_t) rel->r_offset, howto->name);
13153 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13155 /* Get the (signed) value from the instruction. */
13156 addend = value & howto->src_mask;
13157 if (addend & ((howto->src_mask + 1) >> 1))
13159 bfd_signed_vma mask;
13162 mask &= ~ howto->src_mask;
13170 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13172 addend += msec->output_section->vma + msec->output_offset;
13174 /* Cases here must match those in the preceding
13175 switch statement. */
13178 case R_ARM_MOVW_ABS_NC:
13179 case R_ARM_MOVT_ABS:
13180 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13181 | (addend & 0xfff);
13182 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13185 case R_ARM_THM_MOVW_ABS_NC:
13186 case R_ARM_THM_MOVT_ABS:
13187 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13188 | (addend & 0xff) | ((addend & 0x0800) << 15);
13189 bfd_put_16 (input_bfd, value >> 16,
13190 contents + rel->r_offset);
13191 bfd_put_16 (input_bfd, value,
13192 contents + rel->r_offset + 2);
13196 value = (value & ~ howto->dst_mask)
13197 | (addend & howto->dst_mask);
13198 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13204 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13208 bfd_boolean warned, ignored;
13210 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13211 r_symndx, symtab_hdr, sym_hashes,
13212 h, sec, relocation,
13213 unresolved_reloc, warned, ignored);
13215 sym_type = h->type;
13218 if (sec != NULL && discarded_section (sec))
13219 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13220 rel, 1, relend, howto, 0, contents);
13222 if (bfd_link_relocatable (info))
13224 /* This is a relocatable link. We don't have to change
13225 anything, unless the reloc is against a section symbol,
13226 in which case we have to adjust according to where the
13227 section symbol winds up in the output section. */
13228 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13230 if (globals->use_rel)
13231 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13232 howto, (bfd_signed_vma) sec->output_offset);
13234 rel->r_addend += sec->output_offset;
13240 name = h->root.root.string;
13243 name = (bfd_elf_string_from_elf_section
13244 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13245 if (name == NULL || *name == '\0')
13246 name = bfd_section_name (sec);
13249 if (r_symndx != STN_UNDEF
13250 && r_type != R_ARM_NONE
13252 || h->root.type == bfd_link_hash_defined
13253 || h->root.type == bfd_link_hash_defweak)
13254 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13257 ((sym_type == STT_TLS
13258 /* xgettext:c-format */
13259 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13260 /* xgettext:c-format */
13261 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13264 (uint64_t) rel->r_offset,
13269 /* We call elf32_arm_final_link_relocate unless we're completely
13270 done, i.e., the relaxation produced the final output we want,
13271 and we won't let anybody mess with it. Also, we have to do
13272 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13273 both in relaxed and non-relaxed cases. */
13274 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13275 || (IS_ARM_TLS_GNU_RELOC (r_type)
13276 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13277 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13280 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13281 contents, rel, h == NULL);
13282 /* This may have been marked unresolved because it came from
13283 a shared library. But we've just dealt with that. */
13284 unresolved_reloc = 0;
13287 r = bfd_reloc_continue;
13289 if (r == bfd_reloc_continue)
13291 unsigned char branch_type =
13292 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13293 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13295 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13296 input_section, contents, rel,
13297 relocation, info, sec, name,
13298 sym_type, branch_type, h,
13303 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13304 because such sections are not SEC_ALLOC and thus ld.so will
13305 not process them. */
13306 if (unresolved_reloc
13307 && !((input_section->flags & SEC_DEBUGGING) != 0
13309 && _bfd_elf_section_offset (output_bfd, info, input_section,
13310 rel->r_offset) != (bfd_vma) -1)
13313 /* xgettext:c-format */
13314 (_("%pB(%pA+%#" PRIx64 "): "
13315 "unresolvable %s relocation against symbol `%s'"),
13318 (uint64_t) rel->r_offset,
13320 h->root.root.string);
13324 if (r != bfd_reloc_ok)
13328 case bfd_reloc_overflow:
13329 /* If the overflowing reloc was to an undefined symbol,
13330 we have already printed one error message and there
13331 is no point complaining again. */
13332 if (!h || h->root.type != bfd_link_hash_undefined)
13333 (*info->callbacks->reloc_overflow)
13334 (info, (h ? &h->root : NULL), name, howto->name,
13335 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13338 case bfd_reloc_undefined:
13339 (*info->callbacks->undefined_symbol)
13340 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13343 case bfd_reloc_outofrange:
13344 error_message = _("out of range");
13347 case bfd_reloc_notsupported:
13348 error_message = _("unsupported relocation");
13351 case bfd_reloc_dangerous:
13352 /* error_message should already be set. */
13356 error_message = _("unknown error");
13357 /* Fall through. */
13360 BFD_ASSERT (error_message != NULL);
13361 (*info->callbacks->reloc_dangerous)
13362 (info, error_message, input_bfd, input_section, rel->r_offset);
13371 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13372 adds the edit to the start of the list. (The list must be built in order of
13373 ascending TINDEX: the function's callers are primarily responsible for
13374 maintaining that condition). */
13377 add_unwind_table_edit (arm_unwind_table_edit **head,
13378 arm_unwind_table_edit **tail,
13379 arm_unwind_edit_type type,
13380 asection *linked_section,
13381 unsigned int tindex)
13383 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13384 xmalloc (sizeof (arm_unwind_table_edit));
13386 new_edit->type = type;
13387 new_edit->linked_section = linked_section;
13388 new_edit->index = tindex;
13392 new_edit->next = NULL;
13395 (*tail)->next = new_edit;
13397 (*tail) = new_edit;
13400 (*head) = new_edit;
13404 new_edit->next = *head;
13413 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13415 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13417 adjust_exidx_size(asection *exidx_sec, int adjust)
13421 if (!exidx_sec->rawsize)
13422 exidx_sec->rawsize = exidx_sec->size;
13424 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13425 out_sec = exidx_sec->output_section;
13426 /* Adjust size of output section. */
13427 bfd_set_section_size (out_sec, out_sec->size +adjust);
13430 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13432 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13434 struct _arm_elf_section_data *exidx_arm_data;
13436 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13437 add_unwind_table_edit (
13438 &exidx_arm_data->u.exidx.unwind_edit_list,
13439 &exidx_arm_data->u.exidx.unwind_edit_tail,
13440 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13442 exidx_arm_data->additional_reloc_count++;
13444 adjust_exidx_size(exidx_sec, 8);
13447 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13448 made to those tables, such that:
13450 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13451 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13452 codes which have been inlined into the index).
13454 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13456 The edits are applied when the tables are written
13457 (in elf32_arm_write_section). */
13460 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13461 unsigned int num_text_sections,
13462 struct bfd_link_info *info,
13463 bfd_boolean merge_exidx_entries)
13466 unsigned int last_second_word = 0, i;
13467 asection *last_exidx_sec = NULL;
13468 asection *last_text_sec = NULL;
13469 int last_unwind_type = -1;
13471 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13473 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13477 for (sec = inp->sections; sec != NULL; sec = sec->next)
13479 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13480 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13482 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13485 if (elf_sec->linked_to)
13487 Elf_Internal_Shdr *linked_hdr
13488 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13489 struct _arm_elf_section_data *linked_sec_arm_data
13490 = get_arm_elf_section_data (linked_hdr->bfd_section);
13492 if (linked_sec_arm_data == NULL)
13495 /* Link this .ARM.exidx section back from the text section it
13497 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13502 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13503 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13504 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13506 for (i = 0; i < num_text_sections; i++)
13508 asection *sec = text_section_order[i];
13509 asection *exidx_sec;
13510 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13511 struct _arm_elf_section_data *exidx_arm_data;
13512 bfd_byte *contents = NULL;
13513 int deleted_exidx_bytes = 0;
13515 arm_unwind_table_edit *unwind_edit_head = NULL;
13516 arm_unwind_table_edit *unwind_edit_tail = NULL;
13517 Elf_Internal_Shdr *hdr;
13520 if (arm_data == NULL)
13523 exidx_sec = arm_data->u.text.arm_exidx_sec;
13524 if (exidx_sec == NULL)
13526 /* Section has no unwind data. */
13527 if (last_unwind_type == 0 || !last_exidx_sec)
13530 /* Ignore zero sized sections. */
13531 if (sec->size == 0)
13534 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13535 last_unwind_type = 0;
13539 /* Skip /DISCARD/ sections. */
13540 if (bfd_is_abs_section (exidx_sec->output_section))
13543 hdr = &elf_section_data (exidx_sec)->this_hdr;
13544 if (hdr->sh_type != SHT_ARM_EXIDX)
13547 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13548 if (exidx_arm_data == NULL)
13551 ibfd = exidx_sec->owner;
13553 if (hdr->contents != NULL)
13554 contents = hdr->contents;
13555 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13559 if (last_unwind_type > 0)
13561 unsigned int first_word = bfd_get_32 (ibfd, contents);
13562 /* Add cantunwind if first unwind item does not match section
13564 if (first_word != sec->vma)
13566 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13567 last_unwind_type = 0;
13571 for (j = 0; j < hdr->sh_size; j += 8)
13573 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13577 /* An EXIDX_CANTUNWIND entry. */
13578 if (second_word == 1)
13580 if (last_unwind_type == 0)
13584 /* Inlined unwinding data. Merge if equal to previous. */
13585 else if ((second_word & 0x80000000) != 0)
13587 if (merge_exidx_entries
13588 && last_second_word == second_word && last_unwind_type == 1)
13591 last_second_word = second_word;
13593 /* Normal table entry. In theory we could merge these too,
13594 but duplicate entries are likely to be much less common. */
13598 if (elide && !bfd_link_relocatable (info))
13600 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13601 DELETE_EXIDX_ENTRY, NULL, j / 8);
13603 deleted_exidx_bytes += 8;
13606 last_unwind_type = unwind_type;
13609 /* Free contents if we allocated it ourselves. */
13610 if (contents != hdr->contents)
13613 /* Record edits to be applied later (in elf32_arm_write_section). */
13614 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13615 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13617 if (deleted_exidx_bytes > 0)
13618 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13620 last_exidx_sec = exidx_sec;
13621 last_text_sec = sec;
13624 /* Add terminating CANTUNWIND entry. */
13625 if (!bfd_link_relocatable (info) && last_exidx_sec
13626 && last_unwind_type != 0)
13627 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13633 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13634 bfd *ibfd, const char *name)
13636 asection *sec, *osec;
13638 sec = bfd_get_linker_section (ibfd, name);
13639 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13642 osec = sec->output_section;
13643 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13646 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13647 sec->output_offset, sec->size))
13654 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13656 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13657 asection *sec, *osec;
13659 if (globals == NULL)
13662 /* Invoke the regular ELF backend linker to do all the work. */
13663 if (!bfd_elf_final_link (abfd, info))
13666 /* Process stub sections (eg BE8 encoding, ...). */
13667 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13669 for (i=0; i<htab->top_id; i++)
13671 sec = htab->stub_group[i].stub_sec;
13672 /* Only process it once, in its link_sec slot. */
13673 if (sec && i == htab->stub_group[i].link_sec->id)
13675 osec = sec->output_section;
13676 elf32_arm_write_section (abfd, info, sec, sec->contents);
13677 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13678 sec->output_offset, sec->size))
13683 /* Write out any glue sections now that we have created all the
13685 if (globals->bfd_of_glue_owner != NULL)
13687 if (! elf32_arm_output_glue_section (info, abfd,
13688 globals->bfd_of_glue_owner,
13689 ARM2THUMB_GLUE_SECTION_NAME))
13692 if (! elf32_arm_output_glue_section (info, abfd,
13693 globals->bfd_of_glue_owner,
13694 THUMB2ARM_GLUE_SECTION_NAME))
13697 if (! elf32_arm_output_glue_section (info, abfd,
13698 globals->bfd_of_glue_owner,
13699 VFP11_ERRATUM_VENEER_SECTION_NAME))
13702 if (! elf32_arm_output_glue_section (info, abfd,
13703 globals->bfd_of_glue_owner,
13704 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13707 if (! elf32_arm_output_glue_section (info, abfd,
13708 globals->bfd_of_glue_owner,
13709 ARM_BX_GLUE_SECTION_NAME))
13716 /* Return a best guess for the machine number based on the attributes. */
13718 static unsigned int
13719 bfd_arm_get_mach_from_attributes (bfd * abfd)
13721 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13725 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13726 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13727 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13728 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13730 case TAG_CPU_ARCH_V5TE:
13734 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13735 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13739 if (strcmp (name, "IWMMXT2") == 0)
13740 return bfd_mach_arm_iWMMXt2;
13742 if (strcmp (name, "IWMMXT") == 0)
13743 return bfd_mach_arm_iWMMXt;
13745 if (strcmp (name, "XSCALE") == 0)
13749 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13750 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13753 case 1: return bfd_mach_arm_iWMMXt;
13754 case 2: return bfd_mach_arm_iWMMXt2;
13755 default: return bfd_mach_arm_XScale;
13760 return bfd_mach_arm_5TE;
13763 case TAG_CPU_ARCH_V5TEJ:
13764 return bfd_mach_arm_5TEJ;
13765 case TAG_CPU_ARCH_V6:
13766 return bfd_mach_arm_6;
13767 case TAG_CPU_ARCH_V6KZ:
13768 return bfd_mach_arm_6KZ;
13769 case TAG_CPU_ARCH_V6T2:
13770 return bfd_mach_arm_6T2;
13771 case TAG_CPU_ARCH_V6K:
13772 return bfd_mach_arm_6K;
13773 case TAG_CPU_ARCH_V7:
13774 return bfd_mach_arm_7;
13775 case TAG_CPU_ARCH_V6_M:
13776 return bfd_mach_arm_6M;
13777 case TAG_CPU_ARCH_V6S_M:
13778 return bfd_mach_arm_6SM;
13779 case TAG_CPU_ARCH_V7E_M:
13780 return bfd_mach_arm_7EM;
13781 case TAG_CPU_ARCH_V8:
13782 return bfd_mach_arm_8;
13783 case TAG_CPU_ARCH_V8R:
13784 return bfd_mach_arm_8R;
13785 case TAG_CPU_ARCH_V8M_BASE:
13786 return bfd_mach_arm_8M_BASE;
13787 case TAG_CPU_ARCH_V8M_MAIN:
13788 return bfd_mach_arm_8M_MAIN;
13789 case TAG_CPU_ARCH_V8_1M_MAIN:
13790 return bfd_mach_arm_8_1M_MAIN;
13793 /* Force entry to be added for any new known Tag_CPU_arch value. */
13794 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13796 /* Unknown Tag_CPU_arch value. */
13797 return bfd_mach_arm_unknown;
13801 /* Set the right machine number. */
13804 elf32_arm_object_p (bfd *abfd)
13808 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13810 if (mach == bfd_mach_arm_unknown)
13812 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13813 mach = bfd_mach_arm_ep9312;
13815 mach = bfd_arm_get_mach_from_attributes (abfd);
13818 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13822 /* Function to keep ARM specific flags in the ELF header. */
13825 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13827 if (elf_flags_init (abfd)
13828 && elf_elfheader (abfd)->e_flags != flags)
13830 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13832 if (flags & EF_ARM_INTERWORK)
13834 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13838 (_("warning: clearing the interworking flag of %pB due to outside request"),
13844 elf_elfheader (abfd)->e_flags = flags;
13845 elf_flags_init (abfd) = TRUE;
13851 /* Copy backend specific data from one object module to another. */
13854 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13857 flagword out_flags;
13859 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13862 in_flags = elf_elfheader (ibfd)->e_flags;
13863 out_flags = elf_elfheader (obfd)->e_flags;
13865 if (elf_flags_init (obfd)
13866 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13867 && in_flags != out_flags)
13869 /* Cannot mix APCS26 and APCS32 code. */
13870 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13873 /* Cannot mix float APCS and non-float APCS code. */
13874 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13877 /* If the src and dest have different interworking flags
13878 then turn off the interworking bit. */
13879 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13881 if (out_flags & EF_ARM_INTERWORK)
13883 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13886 in_flags &= ~EF_ARM_INTERWORK;
13889 /* Likewise for PIC, though don't warn for this case. */
13890 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13891 in_flags &= ~EF_ARM_PIC;
13894 elf_elfheader (obfd)->e_flags = in_flags;
13895 elf_flags_init (obfd) = TRUE;
13897 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13900 /* Values for Tag_ABI_PCS_R9_use. */
13909 /* Values for Tag_ABI_PCS_RW_data. */
13912 AEABI_PCS_RW_data_absolute,
13913 AEABI_PCS_RW_data_PCrel,
13914 AEABI_PCS_RW_data_SBrel,
13915 AEABI_PCS_RW_data_unused
13918 /* Values for Tag_ABI_enum_size. */
13924 AEABI_enum_forced_wide
13927 /* Determine whether an object attribute tag takes an integer, a
13931 elf32_arm_obj_attrs_arg_type (int tag)
13933 if (tag == Tag_compatibility)
13934 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13935 else if (tag == Tag_nodefaults)
13936 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13937 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13938 return ATTR_TYPE_FLAG_STR_VAL;
13940 return ATTR_TYPE_FLAG_INT_VAL;
13942 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13945 /* The ABI defines that Tag_conformance should be emitted first, and that
13946 Tag_nodefaults should be second (if either is defined). This sets those
13947 two positions, and bumps up the position of all the remaining tags to
13950 elf32_arm_obj_attrs_order (int num)
13952 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13953 return Tag_conformance;
13954 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13955 return Tag_nodefaults;
13956 if ((num - 2) < Tag_nodefaults)
13958 if ((num - 1) < Tag_conformance)
13963 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13965 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13967 if ((tag & 127) < 64)
13970 (_("%pB: unknown mandatory EABI object attribute %d"),
13972 bfd_set_error (bfd_error_bad_value);
13978 (_("warning: %pB: unknown EABI object attribute %d"),
13984 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13985 Returns -1 if no architecture could be read. */
13988 get_secondary_compatible_arch (bfd *abfd)
13990 obj_attribute *attr =
13991 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13993 /* Note: the tag and its argument below are uleb128 values, though
13994 currently-defined values fit in one byte for each. */
13996 && attr->s[0] == Tag_CPU_arch
13997 && (attr->s[1] & 128) != 128
13998 && attr->s[2] == 0)
14001 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14005 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14006 The tag is removed if ARCH is -1. */
14009 set_secondary_compatible_arch (bfd *abfd, int arch)
14011 obj_attribute *attr =
14012 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14020 /* Note: the tag and its argument below are uleb128 values, though
14021 currently-defined values fit in one byte for each. */
14023 attr->s = (char *) bfd_alloc (abfd, 3);
14024 attr->s[0] = Tag_CPU_arch;
14029 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14033 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14034 int newtag, int secondary_compat)
14036 #define T(X) TAG_CPU_ARCH_##X
14037 int tagl, tagh, result;
14040 T(V6T2), /* PRE_V4. */
14042 T(V6T2), /* V4T. */
14043 T(V6T2), /* V5T. */
14044 T(V6T2), /* V5TE. */
14045 T(V6T2), /* V5TEJ. */
14048 T(V6T2) /* V6T2. */
14052 T(V6K), /* PRE_V4. */
14056 T(V6K), /* V5TE. */
14057 T(V6K), /* V5TEJ. */
14059 T(V6KZ), /* V6KZ. */
14065 T(V7), /* PRE_V4. */
14070 T(V7), /* V5TEJ. */
14083 T(V6K), /* V5TE. */
14084 T(V6K), /* V5TEJ. */
14086 T(V6KZ), /* V6KZ. */
14090 T(V6_M) /* V6_M. */
14092 const int v6s_m[] =
14098 T(V6K), /* V5TE. */
14099 T(V6K), /* V5TEJ. */
14101 T(V6KZ), /* V6KZ. */
14105 T(V6S_M), /* V6_M. */
14106 T(V6S_M) /* V6S_M. */
14108 const int v7e_m[] =
14112 T(V7E_M), /* V4T. */
14113 T(V7E_M), /* V5T. */
14114 T(V7E_M), /* V5TE. */
14115 T(V7E_M), /* V5TEJ. */
14116 T(V7E_M), /* V6. */
14117 T(V7E_M), /* V6KZ. */
14118 T(V7E_M), /* V6T2. */
14119 T(V7E_M), /* V6K. */
14120 T(V7E_M), /* V7. */
14121 T(V7E_M), /* V6_M. */
14122 T(V7E_M), /* V6S_M. */
14123 T(V7E_M) /* V7E_M. */
14127 T(V8), /* PRE_V4. */
14132 T(V8), /* V5TEJ. */
14139 T(V8), /* V6S_M. */
14140 T(V8), /* V7E_M. */
14145 T(V8R), /* PRE_V4. */
14149 T(V8R), /* V5TE. */
14150 T(V8R), /* V5TEJ. */
14152 T(V8R), /* V6KZ. */
14153 T(V8R), /* V6T2. */
14156 T(V8R), /* V6_M. */
14157 T(V8R), /* V6S_M. */
14158 T(V8R), /* V7E_M. */
14162 const int v8m_baseline[] =
14175 T(V8M_BASE), /* V6_M. */
14176 T(V8M_BASE), /* V6S_M. */
14180 T(V8M_BASE) /* V8-M BASELINE. */
14182 const int v8m_mainline[] =
14194 T(V8M_MAIN), /* V7. */
14195 T(V8M_MAIN), /* V6_M. */
14196 T(V8M_MAIN), /* V6S_M. */
14197 T(V8M_MAIN), /* V7E_M. */
14200 T(V8M_MAIN), /* V8-M BASELINE. */
14201 T(V8M_MAIN) /* V8-M MAINLINE. */
14203 const int v8_1m_mainline[] =
14215 T(V8_1M_MAIN), /* V7. */
14216 T(V8_1M_MAIN), /* V6_M. */
14217 T(V8_1M_MAIN), /* V6S_M. */
14218 T(V8_1M_MAIN), /* V7E_M. */
14221 T(V8_1M_MAIN), /* V8-M BASELINE. */
14222 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14223 -1, /* Unused (18). */
14224 -1, /* Unused (19). */
14225 -1, /* Unused (20). */
14226 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14228 const int v4t_plus_v6_m[] =
14234 T(V5TE), /* V5TE. */
14235 T(V5TEJ), /* V5TEJ. */
14237 T(V6KZ), /* V6KZ. */
14238 T(V6T2), /* V6T2. */
14241 T(V6_M), /* V6_M. */
14242 T(V6S_M), /* V6S_M. */
14243 T(V7E_M), /* V7E_M. */
14246 T(V8M_BASE), /* V8-M BASELINE. */
14247 T(V8M_MAIN), /* V8-M MAINLINE. */
14248 -1, /* Unused (18). */
14249 -1, /* Unused (19). */
14250 -1, /* Unused (20). */
14251 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14252 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14254 const int *comb[] =
14270 /* Pseudo-architecture. */
14274 /* Check we've not got a higher architecture than we know about. */
14276 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14278 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14282 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14284 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14285 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14286 oldtag = T(V4T_PLUS_V6_M);
14288 /* And override the new tag if we have a Tag_also_compatible_with on the
14291 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14292 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14293 newtag = T(V4T_PLUS_V6_M);
14295 tagl = (oldtag < newtag) ? oldtag : newtag;
14296 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14298 /* Architectures before V6KZ add features monotonically. */
14299 if (tagh <= TAG_CPU_ARCH_V6KZ)
14302 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14304 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14305 as the canonical version. */
14306 if (result == T(V4T_PLUS_V6_M))
14309 *secondary_compat_out = T(V6_M);
14312 *secondary_compat_out = -1;
14316 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14317 ibfd, oldtag, newtag);
14325 /* Query attributes object to see if integer divide instructions may be
14326 present in an object. */
14328 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14330 int arch = attr[Tag_CPU_arch].i;
14331 int profile = attr[Tag_CPU_arch_profile].i;
14333 switch (attr[Tag_DIV_use].i)
14336 /* Integer divide allowed if instruction contained in archetecture. */
14337 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14339 else if (arch >= TAG_CPU_ARCH_V7E_M)
14345 /* Integer divide explicitly prohibited. */
14349 /* Unrecognised case - treat as allowing divide everywhere. */
14351 /* Integer divide allowed in ARM state. */
14356 /* Query attributes object to see if integer divide instructions are
14357 forbidden to be in the object. This is not the inverse of
14358 elf32_arm_attributes_accept_div. */
14360 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14362 return attr[Tag_DIV_use].i == 1;
14365 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14366 are conflicting attributes. */
14369 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14371 bfd *obfd = info->output_bfd;
14372 obj_attribute *in_attr;
14373 obj_attribute *out_attr;
14374 /* Some tags have 0 = don't care, 1 = strong requirement,
14375 2 = weak requirement. */
14376 static const int order_021[3] = {0, 2, 1};
14378 bfd_boolean result = TRUE;
14379 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14381 /* Skip the linker stubs file. This preserves previous behavior
14382 of accepting unknown attributes in the first input file - but
14384 if (ibfd->flags & BFD_LINKER_CREATED)
14387 /* Skip any input that hasn't attribute section.
14388 This enables to link object files without attribute section with
14390 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14393 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14395 /* This is the first object. Copy the attributes. */
14396 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14398 out_attr = elf_known_obj_attributes_proc (obfd);
14400 /* Use the Tag_null value to indicate the attributes have been
14404 /* We do not output objects with Tag_MPextension_use_legacy - we move
14405 the attribute's value to Tag_MPextension_use. */
14406 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14408 if (out_attr[Tag_MPextension_use].i != 0
14409 && out_attr[Tag_MPextension_use_legacy].i
14410 != out_attr[Tag_MPextension_use].i)
14413 (_("Error: %pB has both the current and legacy "
14414 "Tag_MPextension_use attributes"), ibfd);
14418 out_attr[Tag_MPextension_use] =
14419 out_attr[Tag_MPextension_use_legacy];
14420 out_attr[Tag_MPextension_use_legacy].type = 0;
14421 out_attr[Tag_MPextension_use_legacy].i = 0;
14427 in_attr = elf_known_obj_attributes_proc (ibfd);
14428 out_attr = elf_known_obj_attributes_proc (obfd);
14429 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14430 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14432 /* Ignore mismatches if the object doesn't use floating point or is
14433 floating point ABI independent. */
14434 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14435 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14436 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14437 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14438 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14439 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14442 (_("error: %pB uses VFP register arguments, %pB does not"),
14443 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14444 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14449 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14451 /* Merge this attribute with existing attributes. */
14454 case Tag_CPU_raw_name:
14456 /* These are merged after Tag_CPU_arch. */
14459 case Tag_ABI_optimization_goals:
14460 case Tag_ABI_FP_optimization_goals:
14461 /* Use the first value seen. */
14466 int secondary_compat = -1, secondary_compat_out = -1;
14467 unsigned int saved_out_attr = out_attr[i].i;
14469 static const char *name_table[] =
14471 /* These aren't real CPU names, but we can't guess
14472 that from the architecture version alone. */
14488 "ARM v8-M.baseline",
14489 "ARM v8-M.mainline",
14492 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14493 secondary_compat = get_secondary_compatible_arch (ibfd);
14494 secondary_compat_out = get_secondary_compatible_arch (obfd);
14495 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14496 &secondary_compat_out,
14500 /* Return with error if failed to merge. */
14501 if (arch_attr == -1)
14504 out_attr[i].i = arch_attr;
14506 set_secondary_compatible_arch (obfd, secondary_compat_out);
14508 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14509 if (out_attr[i].i == saved_out_attr)
14510 ; /* Leave the names alone. */
14511 else if (out_attr[i].i == in_attr[i].i)
14513 /* The output architecture has been changed to match the
14514 input architecture. Use the input names. */
14515 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14516 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14518 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14519 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14524 out_attr[Tag_CPU_name].s = NULL;
14525 out_attr[Tag_CPU_raw_name].s = NULL;
14528 /* If we still don't have a value for Tag_CPU_name,
14529 make one up now. Tag_CPU_raw_name remains blank. */
14530 if (out_attr[Tag_CPU_name].s == NULL
14531 && out_attr[i].i < ARRAY_SIZE (name_table))
14532 out_attr[Tag_CPU_name].s =
14533 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14537 case Tag_ARM_ISA_use:
14538 case Tag_THUMB_ISA_use:
14539 case Tag_WMMX_arch:
14540 case Tag_Advanced_SIMD_arch:
14541 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14542 case Tag_ABI_FP_rounding:
14543 case Tag_ABI_FP_exceptions:
14544 case Tag_ABI_FP_user_exceptions:
14545 case Tag_ABI_FP_number_model:
14546 case Tag_FP_HP_extension:
14547 case Tag_CPU_unaligned_access:
14549 case Tag_MPextension_use:
14551 /* Use the largest value specified. */
14552 if (in_attr[i].i > out_attr[i].i)
14553 out_attr[i].i = in_attr[i].i;
14556 case Tag_ABI_align_preserved:
14557 case Tag_ABI_PCS_RO_data:
14558 /* Use the smallest value specified. */
14559 if (in_attr[i].i < out_attr[i].i)
14560 out_attr[i].i = in_attr[i].i;
14563 case Tag_ABI_align_needed:
14564 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14565 && (in_attr[Tag_ABI_align_preserved].i == 0
14566 || out_attr[Tag_ABI_align_preserved].i == 0))
14568 /* This error message should be enabled once all non-conformant
14569 binaries in the toolchain have had the attributes set
14572 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14576 /* Fall through. */
14577 case Tag_ABI_FP_denormal:
14578 case Tag_ABI_PCS_GOT_use:
14579 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14580 value if greater than 2 (for future-proofing). */
14581 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14582 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14583 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14584 out_attr[i].i = in_attr[i].i;
14587 case Tag_Virtualization_use:
14588 /* The virtualization tag effectively stores two bits of
14589 information: the intended use of TrustZone (in bit 0), and the
14590 intended use of Virtualization (in bit 1). */
14591 if (out_attr[i].i == 0)
14592 out_attr[i].i = in_attr[i].i;
14593 else if (in_attr[i].i != 0
14594 && in_attr[i].i != out_attr[i].i)
14596 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14601 (_("error: %pB: unable to merge virtualization attributes "
14609 case Tag_CPU_arch_profile:
14610 if (out_attr[i].i != in_attr[i].i)
14612 /* 0 will merge with anything.
14613 'A' and 'S' merge to 'A'.
14614 'R' and 'S' merge to 'R'.
14615 'M' and 'A|R|S' is an error. */
14616 if (out_attr[i].i == 0
14617 || (out_attr[i].i == 'S'
14618 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14619 out_attr[i].i = in_attr[i].i;
14620 else if (in_attr[i].i == 0
14621 || (in_attr[i].i == 'S'
14622 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14623 ; /* Do nothing. */
14627 (_("error: %pB: conflicting architecture profiles %c/%c"),
14629 in_attr[i].i ? in_attr[i].i : '0',
14630 out_attr[i].i ? out_attr[i].i : '0');
14636 case Tag_DSP_extension:
14637 /* No need to change output value if any of:
14638 - pre (<=) ARMv5T input architecture (do not have DSP)
14639 - M input profile not ARMv7E-M and do not have DSP. */
14640 if (in_attr[Tag_CPU_arch].i <= 3
14641 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14642 && in_attr[Tag_CPU_arch].i != 13
14643 && in_attr[i].i == 0))
14644 ; /* Do nothing. */
14645 /* Output value should be 0 if DSP part of architecture, ie.
14646 - post (>=) ARMv5te architecture output
14647 - A, R or S profile output or ARMv7E-M output architecture. */
14648 else if (out_attr[Tag_CPU_arch].i >= 4
14649 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14650 || out_attr[Tag_CPU_arch_profile].i == 'R'
14651 || out_attr[Tag_CPU_arch_profile].i == 'S'
14652 || out_attr[Tag_CPU_arch].i == 13))
14654 /* Otherwise, DSP instructions are added and not part of output
14662 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14663 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14664 when it's 0. It might mean absence of FP hardware if
14665 Tag_FP_arch is zero. */
14667 #define VFP_VERSION_COUNT 9
14668 static const struct
14672 } vfp_versions[VFP_VERSION_COUNT] =
14688 /* If the output has no requirement about FP hardware,
14689 follow the requirement of the input. */
14690 if (out_attr[i].i == 0)
14692 /* This assert is still reasonable, we shouldn't
14693 produce the suspicious build attribute
14694 combination (See below for in_attr). */
14695 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14696 out_attr[i].i = in_attr[i].i;
14697 out_attr[Tag_ABI_HardFP_use].i
14698 = in_attr[Tag_ABI_HardFP_use].i;
14701 /* If the input has no requirement about FP hardware, do
14703 else if (in_attr[i].i == 0)
14705 /* We used to assert that Tag_ABI_HardFP_use was
14706 zero here, but we should never assert when
14707 consuming an object file that has suspicious
14708 build attributes. The single precision variant
14709 of 'no FP architecture' is still 'no FP
14710 architecture', so we just ignore the tag in this
14715 /* Both the input and the output have nonzero Tag_FP_arch.
14716 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14718 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14720 if (in_attr[Tag_ABI_HardFP_use].i == 0
14721 && out_attr[Tag_ABI_HardFP_use].i == 0)
14723 /* If the input and the output have different Tag_ABI_HardFP_use,
14724 the combination of them is 0 (implied by Tag_FP_arch). */
14725 else if (in_attr[Tag_ABI_HardFP_use].i
14726 != out_attr[Tag_ABI_HardFP_use].i)
14727 out_attr[Tag_ABI_HardFP_use].i = 0;
14729 /* Now we can handle Tag_FP_arch. */
14731 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14732 pick the biggest. */
14733 if (in_attr[i].i >= VFP_VERSION_COUNT
14734 && in_attr[i].i > out_attr[i].i)
14736 out_attr[i] = in_attr[i];
14739 /* The output uses the superset of input features
14740 (ISA version) and registers. */
14741 ver = vfp_versions[in_attr[i].i].ver;
14742 if (ver < vfp_versions[out_attr[i].i].ver)
14743 ver = vfp_versions[out_attr[i].i].ver;
14744 regs = vfp_versions[in_attr[i].i].regs;
14745 if (regs < vfp_versions[out_attr[i].i].regs)
14746 regs = vfp_versions[out_attr[i].i].regs;
14747 /* This assumes all possible supersets are also a valid
14749 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14751 if (regs == vfp_versions[newval].regs
14752 && ver == vfp_versions[newval].ver)
14755 out_attr[i].i = newval;
14758 case Tag_PCS_config:
14759 if (out_attr[i].i == 0)
14760 out_attr[i].i = in_attr[i].i;
14761 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14763 /* It's sometimes ok to mix different configs, so this is only
14766 (_("warning: %pB: conflicting platform configuration"), ibfd);
14769 case Tag_ABI_PCS_R9_use:
14770 if (in_attr[i].i != out_attr[i].i
14771 && out_attr[i].i != AEABI_R9_unused
14772 && in_attr[i].i != AEABI_R9_unused)
14775 (_("error: %pB: conflicting use of R9"), ibfd);
14778 if (out_attr[i].i == AEABI_R9_unused)
14779 out_attr[i].i = in_attr[i].i;
14781 case Tag_ABI_PCS_RW_data:
14782 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14783 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14784 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14787 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14791 /* Use the smallest value specified. */
14792 if (in_attr[i].i < out_attr[i].i)
14793 out_attr[i].i = in_attr[i].i;
14795 case Tag_ABI_PCS_wchar_t:
14796 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14797 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14800 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14801 ibfd, in_attr[i].i, out_attr[i].i);
14803 else if (in_attr[i].i && !out_attr[i].i)
14804 out_attr[i].i = in_attr[i].i;
14806 case Tag_ABI_enum_size:
14807 if (in_attr[i].i != AEABI_enum_unused)
14809 if (out_attr[i].i == AEABI_enum_unused
14810 || out_attr[i].i == AEABI_enum_forced_wide)
14812 /* The existing object is compatible with anything.
14813 Use whatever requirements the new object has. */
14814 out_attr[i].i = in_attr[i].i;
14816 else if (in_attr[i].i != AEABI_enum_forced_wide
14817 && out_attr[i].i != in_attr[i].i
14818 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14820 static const char *aeabi_enum_names[] =
14821 { "", "variable-size", "32-bit", "" };
14822 const char *in_name =
14823 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14824 ? aeabi_enum_names[in_attr[i].i]
14826 const char *out_name =
14827 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14828 ? aeabi_enum_names[out_attr[i].i]
14831 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14832 ibfd, in_name, out_name);
14836 case Tag_ABI_VFP_args:
14839 case Tag_ABI_WMMX_args:
14840 if (in_attr[i].i != out_attr[i].i)
14843 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14848 case Tag_compatibility:
14849 /* Merged in target-independent code. */
14851 case Tag_ABI_HardFP_use:
14852 /* This is handled along with Tag_FP_arch. */
14854 case Tag_ABI_FP_16bit_format:
14855 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14857 if (in_attr[i].i != out_attr[i].i)
14860 (_("error: fp16 format mismatch between %pB and %pB"),
14865 if (in_attr[i].i != 0)
14866 out_attr[i].i = in_attr[i].i;
14870 /* A value of zero on input means that the divide instruction may
14871 be used if available in the base architecture as specified via
14872 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14873 the user did not want divide instructions. A value of 2
14874 explicitly means that divide instructions were allowed in ARM
14875 and Thumb state. */
14876 if (in_attr[i].i == out_attr[i].i)
14877 /* Do nothing. */ ;
14878 else if (elf32_arm_attributes_forbid_div (in_attr)
14879 && !elf32_arm_attributes_accept_div (out_attr))
14881 else if (elf32_arm_attributes_forbid_div (out_attr)
14882 && elf32_arm_attributes_accept_div (in_attr))
14883 out_attr[i].i = in_attr[i].i;
14884 else if (in_attr[i].i == 2)
14885 out_attr[i].i = in_attr[i].i;
14888 case Tag_MPextension_use_legacy:
14889 /* We don't output objects with Tag_MPextension_use_legacy - we
14890 move the value to Tag_MPextension_use. */
14891 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14893 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14896 (_("%pB has both the current and legacy "
14897 "Tag_MPextension_use attributes"),
14903 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14904 out_attr[Tag_MPextension_use] = in_attr[i];
14908 case Tag_nodefaults:
14909 /* This tag is set if it exists, but the value is unused (and is
14910 typically zero). We don't actually need to do anything here -
14911 the merge happens automatically when the type flags are merged
14914 case Tag_also_compatible_with:
14915 /* Already done in Tag_CPU_arch. */
14917 case Tag_conformance:
14918 /* Keep the attribute if it matches. Throw it away otherwise.
14919 No attribute means no claim to conform. */
14920 if (!in_attr[i].s || !out_attr[i].s
14921 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14922 out_attr[i].s = NULL;
14927 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14930 /* If out_attr was copied from in_attr then it won't have a type yet. */
14931 if (in_attr[i].type && !out_attr[i].type)
14932 out_attr[i].type = in_attr[i].type;
14935 /* Merge Tag_compatibility attributes and any common GNU ones. */
14936 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14939 /* Check for any attributes not known on ARM. */
14940 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14946 /* Return TRUE if the two EABI versions are incompatible. */
14949 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14951 /* v4 and v5 are the same spec before and after it was released,
14952 so allow mixing them. */
14953 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14954 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14957 return (iver == over);
14960 /* Merge backend specific data from an object file to the output
14961 object file when linking. */
14964 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14966 /* Display the flags field. */
14969 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14971 FILE * file = (FILE *) ptr;
14972 unsigned long flags;
14974 BFD_ASSERT (abfd != NULL && ptr != NULL);
14976 /* Print normal ELF private data. */
14977 _bfd_elf_print_private_bfd_data (abfd, ptr);
14979 flags = elf_elfheader (abfd)->e_flags;
14980 /* Ignore init flag - it may not be set, despite the flags field
14981 containing valid data. */
14983 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
14985 switch (EF_ARM_EABI_VERSION (flags))
14987 case EF_ARM_EABI_UNKNOWN:
14988 /* The following flag bits are GNU extensions and not part of the
14989 official ARM ELF extended ABI. Hence they are only decoded if
14990 the EABI version is not set. */
14991 if (flags & EF_ARM_INTERWORK)
14992 fprintf (file, _(" [interworking enabled]"));
14994 if (flags & EF_ARM_APCS_26)
14995 fprintf (file, " [APCS-26]");
14997 fprintf (file, " [APCS-32]");
14999 if (flags & EF_ARM_VFP_FLOAT)
15000 fprintf (file, _(" [VFP float format]"));
15001 else if (flags & EF_ARM_MAVERICK_FLOAT)
15002 fprintf (file, _(" [Maverick float format]"));
15004 fprintf (file, _(" [FPA float format]"));
15006 if (flags & EF_ARM_APCS_FLOAT)
15007 fprintf (file, _(" [floats passed in float registers]"));
15009 if (flags & EF_ARM_PIC)
15010 fprintf (file, _(" [position independent]"));
15012 if (flags & EF_ARM_NEW_ABI)
15013 fprintf (file, _(" [new ABI]"));
15015 if (flags & EF_ARM_OLD_ABI)
15016 fprintf (file, _(" [old ABI]"));
15018 if (flags & EF_ARM_SOFT_FLOAT)
15019 fprintf (file, _(" [software FP]"));
15021 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15022 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15023 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15024 | EF_ARM_MAVERICK_FLOAT);
15027 case EF_ARM_EABI_VER1:
15028 fprintf (file, _(" [Version1 EABI]"));
15030 if (flags & EF_ARM_SYMSARESORTED)
15031 fprintf (file, _(" [sorted symbol table]"));
15033 fprintf (file, _(" [unsorted symbol table]"));
15035 flags &= ~ EF_ARM_SYMSARESORTED;
15038 case EF_ARM_EABI_VER2:
15039 fprintf (file, _(" [Version2 EABI]"));
15041 if (flags & EF_ARM_SYMSARESORTED)
15042 fprintf (file, _(" [sorted symbol table]"));
15044 fprintf (file, _(" [unsorted symbol table]"));
15046 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15047 fprintf (file, _(" [dynamic symbols use segment index]"));
15049 if (flags & EF_ARM_MAPSYMSFIRST)
15050 fprintf (file, _(" [mapping symbols precede others]"));
15052 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15053 | EF_ARM_MAPSYMSFIRST);
15056 case EF_ARM_EABI_VER3:
15057 fprintf (file, _(" [Version3 EABI]"));
15060 case EF_ARM_EABI_VER4:
15061 fprintf (file, _(" [Version4 EABI]"));
15064 case EF_ARM_EABI_VER5:
15065 fprintf (file, _(" [Version5 EABI]"));
15067 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15068 fprintf (file, _(" [soft-float ABI]"));
15070 if (flags & EF_ARM_ABI_FLOAT_HARD)
15071 fprintf (file, _(" [hard-float ABI]"));
15073 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15076 if (flags & EF_ARM_BE8)
15077 fprintf (file, _(" [BE8]"));
15079 if (flags & EF_ARM_LE8)
15080 fprintf (file, _(" [LE8]"));
15082 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15086 fprintf (file, _(" <EABI version unrecognised>"));
15090 flags &= ~ EF_ARM_EABIMASK;
15092 if (flags & EF_ARM_RELEXEC)
15093 fprintf (file, _(" [relocatable executable]"));
15095 if (flags & EF_ARM_PIC)
15096 fprintf (file, _(" [position independent]"));
15098 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15099 fprintf (file, _(" [FDPIC ABI supplement]"));
15101 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15104 fprintf (file, _(" <Unrecognised flag bits set>"));
15106 fputc ('\n', file);
15112 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15114 switch (ELF_ST_TYPE (elf_sym->st_info))
15116 case STT_ARM_TFUNC:
15117 return ELF_ST_TYPE (elf_sym->st_info);
15119 case STT_ARM_16BIT:
15120 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15121 This allows us to distinguish between data used by Thumb instructions
15122 and non-data (which is probably code) inside Thumb regions of an
15124 if (type != STT_OBJECT && type != STT_TLS)
15125 return ELF_ST_TYPE (elf_sym->st_info);
15136 elf32_arm_gc_mark_hook (asection *sec,
15137 struct bfd_link_info *info,
15138 Elf_Internal_Rela *rel,
15139 struct elf_link_hash_entry *h,
15140 Elf_Internal_Sym *sym)
15143 switch (ELF32_R_TYPE (rel->r_info))
15145 case R_ARM_GNU_VTINHERIT:
15146 case R_ARM_GNU_VTENTRY:
15150 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15153 /* Look through the relocs for a section during the first phase. */
15156 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15157 asection *sec, const Elf_Internal_Rela *relocs)
15159 Elf_Internal_Shdr *symtab_hdr;
15160 struct elf_link_hash_entry **sym_hashes;
15161 const Elf_Internal_Rela *rel;
15162 const Elf_Internal_Rela *rel_end;
15165 struct elf32_arm_link_hash_table *htab;
15166 bfd_boolean call_reloc_p;
15167 bfd_boolean may_become_dynamic_p;
15168 bfd_boolean may_need_local_target_p;
15169 unsigned long nsyms;
15171 if (bfd_link_relocatable (info))
15174 BFD_ASSERT (is_arm_elf (abfd));
15176 htab = elf32_arm_hash_table (info);
15182 /* Create dynamic sections for relocatable executables so that we can
15183 copy relocations. */
15184 if (htab->root.is_relocatable_executable
15185 && ! htab->root.dynamic_sections_created)
15187 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15191 if (htab->root.dynobj == NULL)
15192 htab->root.dynobj = abfd;
15193 if (!create_ifunc_sections (info))
15196 dynobj = htab->root.dynobj;
15198 symtab_hdr = & elf_symtab_hdr (abfd);
15199 sym_hashes = elf_sym_hashes (abfd);
15200 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15202 rel_end = relocs + sec->reloc_count;
15203 for (rel = relocs; rel < rel_end; rel++)
15205 Elf_Internal_Sym *isym;
15206 struct elf_link_hash_entry *h;
15207 struct elf32_arm_link_hash_entry *eh;
15208 unsigned int r_symndx;
15211 r_symndx = ELF32_R_SYM (rel->r_info);
15212 r_type = ELF32_R_TYPE (rel->r_info);
15213 r_type = arm_real_reloc_type (htab, r_type);
15215 if (r_symndx >= nsyms
15216 /* PR 9934: It is possible to have relocations that do not
15217 refer to symbols, thus it is also possible to have an
15218 object file containing relocations but no symbol table. */
15219 && (r_symndx > STN_UNDEF || nsyms > 0))
15221 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15230 if (r_symndx < symtab_hdr->sh_info)
15232 /* A local symbol. */
15233 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15240 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15241 while (h->root.type == bfd_link_hash_indirect
15242 || h->root.type == bfd_link_hash_warning)
15243 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15247 eh = (struct elf32_arm_link_hash_entry *) h;
15249 call_reloc_p = FALSE;
15250 may_become_dynamic_p = FALSE;
15251 may_need_local_target_p = FALSE;
15253 /* Could be done earlier, if h were already available. */
15254 r_type = elf32_arm_tls_transition (info, r_type, h);
15257 case R_ARM_GOTOFFFUNCDESC:
15261 if (!elf32_arm_allocate_local_sym_info (abfd))
15263 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15264 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15268 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15273 case R_ARM_GOTFUNCDESC:
15277 /* Such a relocation is not supposed to be generated
15278 by gcc on a static function. */
15279 /* Anyway if needed it could be handled. */
15284 eh->fdpic_cnts.gotfuncdesc_cnt++;
15289 case R_ARM_FUNCDESC:
15293 if (!elf32_arm_allocate_local_sym_info (abfd))
15295 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15296 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15300 eh->fdpic_cnts.funcdesc_cnt++;
15306 case R_ARM_GOT_PREL:
15307 case R_ARM_TLS_GD32:
15308 case R_ARM_TLS_GD32_FDPIC:
15309 case R_ARM_TLS_IE32:
15310 case R_ARM_TLS_IE32_FDPIC:
15311 case R_ARM_TLS_GOTDESC:
15312 case R_ARM_TLS_DESCSEQ:
15313 case R_ARM_THM_TLS_DESCSEQ:
15314 case R_ARM_TLS_CALL:
15315 case R_ARM_THM_TLS_CALL:
15316 /* This symbol requires a global offset table entry. */
15318 int tls_type, old_tls_type;
15322 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15323 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15325 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15326 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15328 case R_ARM_TLS_GOTDESC:
15329 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15330 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15331 tls_type = GOT_TLS_GDESC; break;
15333 default: tls_type = GOT_NORMAL; break;
15336 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15337 info->flags |= DF_STATIC_TLS;
15342 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15346 /* This is a global offset table entry for a local symbol. */
15347 if (!elf32_arm_allocate_local_sym_info (abfd))
15349 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15350 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15353 /* If a variable is accessed with both tls methods, two
15354 slots may be created. */
15355 if (GOT_TLS_GD_ANY_P (old_tls_type)
15356 && GOT_TLS_GD_ANY_P (tls_type))
15357 tls_type |= old_tls_type;
15359 /* We will already have issued an error message if there
15360 is a TLS/non-TLS mismatch, based on the symbol
15361 type. So just combine any TLS types needed. */
15362 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15363 && tls_type != GOT_NORMAL)
15364 tls_type |= old_tls_type;
15366 /* If the symbol is accessed in both IE and GDESC
15367 method, we're able to relax. Turn off the GDESC flag,
15368 without messing up with any other kind of tls types
15369 that may be involved. */
15370 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15371 tls_type &= ~GOT_TLS_GDESC;
15373 if (old_tls_type != tls_type)
15376 elf32_arm_hash_entry (h)->tls_type = tls_type;
15378 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15381 /* Fall through. */
15383 case R_ARM_TLS_LDM32:
15384 case R_ARM_TLS_LDM32_FDPIC:
15385 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15386 htab->tls_ldm_got.refcount++;
15387 /* Fall through. */
15389 case R_ARM_GOTOFF32:
15391 if (htab->root.sgot == NULL
15392 && !create_got_section (htab->root.dynobj, info))
15401 case R_ARM_THM_CALL:
15402 case R_ARM_THM_JUMP24:
15403 case R_ARM_THM_JUMP19:
15404 call_reloc_p = TRUE;
15405 may_need_local_target_p = TRUE;
15409 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15410 ldr __GOTT_INDEX__ offsets. */
15411 if (htab->root.target_os != is_vxworks)
15413 may_need_local_target_p = TRUE;
15416 else goto jump_over;
15418 /* Fall through. */
15420 case R_ARM_MOVW_ABS_NC:
15421 case R_ARM_MOVT_ABS:
15422 case R_ARM_THM_MOVW_ABS_NC:
15423 case R_ARM_THM_MOVT_ABS:
15424 if (bfd_link_pic (info))
15427 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15428 abfd, elf32_arm_howto_table_1[r_type].name,
15429 (h) ? h->root.root.string : "a local symbol");
15430 bfd_set_error (bfd_error_bad_value);
15434 /* Fall through. */
15436 case R_ARM_ABS32_NOI:
15438 if (h != NULL && bfd_link_executable (info))
15440 h->pointer_equality_needed = 1;
15442 /* Fall through. */
15444 case R_ARM_REL32_NOI:
15445 case R_ARM_MOVW_PREL_NC:
15446 case R_ARM_MOVT_PREL:
15447 case R_ARM_THM_MOVW_PREL_NC:
15448 case R_ARM_THM_MOVT_PREL:
15450 /* Should the interworking branches be listed here? */
15451 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15453 && (sec->flags & SEC_ALLOC) != 0)
15456 && elf32_arm_howto_from_type (r_type)->pc_relative)
15458 /* In shared libraries and relocatable executables,
15459 we treat local relative references as calls;
15460 see the related SYMBOL_CALLS_LOCAL code in
15461 allocate_dynrelocs. */
15462 call_reloc_p = TRUE;
15463 may_need_local_target_p = TRUE;
15466 /* We are creating a shared library or relocatable
15467 executable, and this is a reloc against a global symbol,
15468 or a non-PC-relative reloc against a local symbol.
15469 We may need to copy the reloc into the output. */
15470 may_become_dynamic_p = TRUE;
15473 may_need_local_target_p = TRUE;
15476 /* This relocation describes the C++ object vtable hierarchy.
15477 Reconstruct it for later use during GC. */
15478 case R_ARM_GNU_VTINHERIT:
15479 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15483 /* This relocation describes which C++ vtable entries are actually
15484 used. Record for later use during GC. */
15485 case R_ARM_GNU_VTENTRY:
15486 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15494 /* We may need a .plt entry if the function this reloc
15495 refers to is in a different object, regardless of the
15496 symbol's type. We can't tell for sure yet, because
15497 something later might force the symbol local. */
15499 else if (may_need_local_target_p)
15500 /* If this reloc is in a read-only section, we might
15501 need a copy reloc. We can't check reliably at this
15502 stage whether the section is read-only, as input
15503 sections have not yet been mapped to output sections.
15504 Tentatively set the flag for now, and correct in
15505 adjust_dynamic_symbol. */
15506 h->non_got_ref = 1;
15509 if (may_need_local_target_p
15510 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15512 union gotplt_union *root_plt;
15513 struct arm_plt_info *arm_plt;
15514 struct arm_local_iplt_info *local_iplt;
15518 root_plt = &h->plt;
15519 arm_plt = &eh->plt;
15523 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15524 if (local_iplt == NULL)
15526 root_plt = &local_iplt->root;
15527 arm_plt = &local_iplt->arm;
15530 /* If the symbol is a function that doesn't bind locally,
15531 this relocation will need a PLT entry. */
15532 if (root_plt->refcount != -1)
15533 root_plt->refcount += 1;
15536 arm_plt->noncall_refcount++;
15538 /* It's too early to use htab->use_blx here, so we have to
15539 record possible blx references separately from
15540 relocs that definitely need a thumb stub. */
15542 if (r_type == R_ARM_THM_CALL)
15543 arm_plt->maybe_thumb_refcount += 1;
15545 if (r_type == R_ARM_THM_JUMP24
15546 || r_type == R_ARM_THM_JUMP19)
15547 arm_plt->thumb_refcount += 1;
15550 if (may_become_dynamic_p)
15552 struct elf_dyn_relocs *p, **head;
15554 /* Create a reloc section in dynobj. */
15555 if (sreloc == NULL)
15557 sreloc = _bfd_elf_make_dynamic_reloc_section
15558 (sec, dynobj, 2, abfd, ! htab->use_rel);
15560 if (sreloc == NULL)
15564 /* If this is a global symbol, count the number of
15565 relocations we need for this symbol. */
15567 head = &h->dyn_relocs;
15570 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15576 if (p == NULL || p->sec != sec)
15578 size_t amt = sizeof *p;
15580 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15590 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15593 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15594 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15595 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15596 that will become rofixup. */
15597 /* This is due to the fact that we suppose all will become rofixup. */
15598 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15600 (_("FDPIC does not yet support %s relocation"
15601 " to become dynamic for executable"),
15602 elf32_arm_howto_table_1[r_type].name);
15612 elf32_arm_update_relocs (asection *o,
15613 struct bfd_elf_section_reloc_data *reldata)
15615 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15616 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15617 const struct elf_backend_data *bed;
15618 _arm_elf_section_data *eado;
15619 struct bfd_link_order *p;
15620 bfd_byte *erela_head, *erela;
15621 Elf_Internal_Rela *irela_head, *irela;
15622 Elf_Internal_Shdr *rel_hdr;
15624 unsigned int count;
15626 eado = get_arm_elf_section_data (o);
15628 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15632 bed = get_elf_backend_data (abfd);
15633 rel_hdr = reldata->hdr;
15635 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15637 swap_in = bed->s->swap_reloc_in;
15638 swap_out = bed->s->swap_reloc_out;
15640 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15642 swap_in = bed->s->swap_reloca_in;
15643 swap_out = bed->s->swap_reloca_out;
15648 erela_head = rel_hdr->contents;
15649 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15650 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15652 erela = erela_head;
15653 irela = irela_head;
15656 for (p = o->map_head.link_order; p; p = p->next)
15658 if (p->type == bfd_section_reloc_link_order
15659 || p->type == bfd_symbol_reloc_link_order)
15661 (*swap_in) (abfd, erela, irela);
15662 erela += rel_hdr->sh_entsize;
15666 else if (p->type == bfd_indirect_link_order)
15668 struct bfd_elf_section_reloc_data *input_reldata;
15669 arm_unwind_table_edit *edit_list, *edit_tail;
15670 _arm_elf_section_data *eadi;
15675 i = p->u.indirect.section;
15677 eadi = get_arm_elf_section_data (i);
15678 edit_list = eadi->u.exidx.unwind_edit_list;
15679 edit_tail = eadi->u.exidx.unwind_edit_tail;
15680 offset = i->output_offset;
15682 if (eadi->elf.rel.hdr &&
15683 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15684 input_reldata = &eadi->elf.rel;
15685 else if (eadi->elf.rela.hdr &&
15686 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15687 input_reldata = &eadi->elf.rela;
15693 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15695 arm_unwind_table_edit *edit_node, *edit_next;
15697 bfd_vma reloc_index;
15699 (*swap_in) (abfd, erela, irela);
15700 reloc_index = (irela->r_offset - offset) / 8;
15703 edit_node = edit_list;
15704 for (edit_next = edit_list;
15705 edit_next && edit_next->index <= reloc_index;
15706 edit_next = edit_node->next)
15709 edit_node = edit_next;
15712 if (edit_node->type != DELETE_EXIDX_ENTRY
15713 || edit_node->index != reloc_index)
15715 irela->r_offset -= bias * 8;
15720 erela += rel_hdr->sh_entsize;
15723 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15725 /* New relocation entity. */
15726 asection *text_sec = edit_tail->linked_section;
15727 asection *text_out = text_sec->output_section;
15728 bfd_vma exidx_offset = offset + i->size - 8;
15730 irela->r_addend = 0;
15731 irela->r_offset = exidx_offset;
15732 irela->r_info = ELF32_R_INFO
15733 (text_out->target_index, R_ARM_PREL31);
15740 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15742 (*swap_in) (abfd, erela, irela);
15743 erela += rel_hdr->sh_entsize;
15747 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15752 reldata->count = count;
15753 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15755 erela = erela_head;
15756 irela = irela_head;
15759 (*swap_out) (abfd, irela, erela);
15760 erela += rel_hdr->sh_entsize;
15767 /* Hashes are no longer valid. */
15768 free (reldata->hashes);
15769 reldata->hashes = NULL;
15772 /* Unwinding tables are not referenced directly. This pass marks them as
15773 required if the corresponding code section is marked. Similarly, ARMv8-M
15774 secure entry functions can only be referenced by SG veneers which are
15775 created after the GC process. They need to be marked in case they reside in
15776 their own section (as would be the case if code was compiled with
15777 -ffunction-sections). */
15780 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15781 elf_gc_mark_hook_fn gc_mark_hook)
15784 Elf_Internal_Shdr **elf_shdrp;
15785 asection *cmse_sec;
15786 obj_attribute *out_attr;
15787 Elf_Internal_Shdr *symtab_hdr;
15788 unsigned i, sym_count, ext_start;
15789 const struct elf_backend_data *bed;
15790 struct elf_link_hash_entry **sym_hashes;
15791 struct elf32_arm_link_hash_entry *cmse_hash;
15792 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15793 bfd_boolean debug_sec_need_to_be_marked = FALSE;
15796 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15798 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15799 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15800 && out_attr[Tag_CPU_arch_profile].i == 'M';
15802 /* Marking EH data may cause additional code sections to be marked,
15803 requiring multiple passes. */
15808 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15812 if (! is_arm_elf (sub))
15815 elf_shdrp = elf_elfsections (sub);
15816 for (o = sub->sections; o != NULL; o = o->next)
15818 Elf_Internal_Shdr *hdr;
15820 hdr = &elf_section_data (o)->this_hdr;
15821 if (hdr->sh_type == SHT_ARM_EXIDX
15823 && hdr->sh_link < elf_numsections (sub)
15825 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15828 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15833 /* Mark section holding ARMv8-M secure entry functions. We mark all
15834 of them so no need for a second browsing. */
15835 if (is_v8m && first_bfd_browse)
15837 sym_hashes = elf_sym_hashes (sub);
15838 bed = get_elf_backend_data (sub);
15839 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15840 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15841 ext_start = symtab_hdr->sh_info;
15843 /* Scan symbols. */
15844 for (i = ext_start; i < sym_count; i++)
15846 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15848 /* Assume it is a special symbol. If not, cmse_scan will
15849 warn about it and user can do something about it. */
15850 if (CONST_STRNEQ (cmse_hash->root.root.root.string,
15853 cmse_sec = cmse_hash->root.root.u.def.section;
15854 if (!cmse_sec->gc_mark
15855 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15857 /* The debug sections related to these secure entry
15858 functions are marked on enabling below flag. */
15859 debug_sec_need_to_be_marked = TRUE;
15863 if (debug_sec_need_to_be_marked)
15865 /* Looping over all the sections of the object file containing
15866 Armv8-M secure entry functions and marking all the debug
15868 for (isec = sub->sections; isec != NULL; isec = isec->next)
15870 /* If not a debug sections, skip it. */
15871 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
15872 isec->gc_mark = 1 ;
15874 debug_sec_need_to_be_marked = FALSE;
15878 first_bfd_browse = FALSE;
15884 /* Treat mapping symbols as special target symbols. */
15887 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15889 return bfd_is_arm_special_symbol_name (sym->name,
15890 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15893 /* If the ELF symbol SYM might be a function in SEC, return the
15894 function size and set *CODE_OFF to the function's entry point,
15895 otherwise return zero. */
15897 static bfd_size_type
15898 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
15901 bfd_size_type size;
15903 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
15904 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
15905 || sym->section != sec)
15908 if (!(sym->flags & BSF_SYNTHETIC))
15909 switch (ELF_ST_TYPE (((elf_symbol_type *) sym)->internal_elf_sym.st_info))
15912 case STT_ARM_TFUNC:
15919 if ((sym->flags & BSF_LOCAL)
15920 && bfd_is_arm_special_symbol_name (sym->name,
15921 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15924 *code_off = sym->value;
15926 if (!(sym->flags & BSF_SYNTHETIC))
15927 size = ((elf_symbol_type *) sym)->internal_elf_sym.st_size;
15934 elf32_arm_find_inliner_info (bfd * abfd,
15935 const char ** filename_ptr,
15936 const char ** functionname_ptr,
15937 unsigned int * line_ptr)
15940 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15941 functionname_ptr, line_ptr,
15942 & elf_tdata (abfd)->dwarf2_find_line_info);
15946 /* Adjust a symbol defined by a dynamic object and referenced by a
15947 regular object. The current definition is in some section of the
15948 dynamic object, but we're not including those sections. We have to
15949 change the definition to something the rest of the link can
15953 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15954 struct elf_link_hash_entry * h)
15957 asection *s, *srel;
15958 struct elf32_arm_link_hash_entry * eh;
15959 struct elf32_arm_link_hash_table *globals;
15961 globals = elf32_arm_hash_table (info);
15962 if (globals == NULL)
15965 dynobj = elf_hash_table (info)->dynobj;
15967 /* Make sure we know what is going on here. */
15968 BFD_ASSERT (dynobj != NULL
15970 || h->type == STT_GNU_IFUNC
15974 && !h->def_regular)));
15976 eh = (struct elf32_arm_link_hash_entry *) h;
15978 /* If this is a function, put it in the procedure linkage table. We
15979 will fill in the contents of the procedure linkage table later,
15980 when we know the address of the .got section. */
15981 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15983 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15984 symbol binds locally. */
15985 if (h->plt.refcount <= 0
15986 || (h->type != STT_GNU_IFUNC
15987 && (SYMBOL_CALLS_LOCAL (info, h)
15988 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15989 && h->root.type == bfd_link_hash_undefweak))))
15991 /* This case can occur if we saw a PLT32 reloc in an input
15992 file, but the symbol was never referred to by a dynamic
15993 object, or if all references were garbage collected. In
15994 such a case, we don't actually need to build a procedure
15995 linkage table, and we can just do a PC24 reloc instead. */
15996 h->plt.offset = (bfd_vma) -1;
15997 eh->plt.thumb_refcount = 0;
15998 eh->plt.maybe_thumb_refcount = 0;
15999 eh->plt.noncall_refcount = 0;
16007 /* It's possible that we incorrectly decided a .plt reloc was
16008 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16009 in check_relocs. We can't decide accurately between function
16010 and non-function syms in check-relocs; Objects loaded later in
16011 the link may change h->type. So fix it now. */
16012 h->plt.offset = (bfd_vma) -1;
16013 eh->plt.thumb_refcount = 0;
16014 eh->plt.maybe_thumb_refcount = 0;
16015 eh->plt.noncall_refcount = 0;
16018 /* If this is a weak symbol, and there is a real definition, the
16019 processor independent code will have arranged for us to see the
16020 real definition first, and we can just use the same value. */
16021 if (h->is_weakalias)
16023 struct elf_link_hash_entry *def = weakdef (h);
16024 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16025 h->root.u.def.section = def->root.u.def.section;
16026 h->root.u.def.value = def->root.u.def.value;
16030 /* If there are no non-GOT references, we do not need a copy
16032 if (!h->non_got_ref)
16035 /* This is a reference to a symbol defined by a dynamic object which
16036 is not a function. */
16038 /* If we are creating a shared library, we must presume that the
16039 only references to the symbol are via the global offset table.
16040 For such cases we need not do anything here; the relocations will
16041 be handled correctly by relocate_section. Relocatable executables
16042 can reference data in shared objects directly, so we don't need to
16043 do anything here. */
16044 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16047 /* We must allocate the symbol in our .dynbss section, which will
16048 become part of the .bss section of the executable. There will be
16049 an entry for this symbol in the .dynsym section. The dynamic
16050 object will contain position independent code, so all references
16051 from the dynamic object to this symbol will go through the global
16052 offset table. The dynamic linker will use the .dynsym entry to
16053 determine the address it must put in the global offset table, so
16054 both the dynamic object and the regular object will refer to the
16055 same memory location for the variable. */
16056 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16057 linker to copy the initial value out of the dynamic object and into
16058 the runtime process image. We need to remember the offset into the
16059 .rel(a).bss section we are going to use. */
16060 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16062 s = globals->root.sdynrelro;
16063 srel = globals->root.sreldynrelro;
16067 s = globals->root.sdynbss;
16068 srel = globals->root.srelbss;
16070 if (info->nocopyreloc == 0
16071 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16074 elf32_arm_allocate_dynrelocs (info, srel, 1);
16078 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16081 /* Allocate space in .plt, .got and associated reloc sections for
16085 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16087 struct bfd_link_info *info;
16088 struct elf32_arm_link_hash_table *htab;
16089 struct elf32_arm_link_hash_entry *eh;
16090 struct elf_dyn_relocs *p;
16092 if (h->root.type == bfd_link_hash_indirect)
16095 eh = (struct elf32_arm_link_hash_entry *) h;
16097 info = (struct bfd_link_info *) inf;
16098 htab = elf32_arm_hash_table (info);
16102 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16103 && h->plt.refcount > 0)
16105 /* Make sure this symbol is output as a dynamic symbol.
16106 Undefined weak syms won't yet be marked as dynamic. */
16107 if (h->dynindx == -1 && !h->forced_local
16108 && h->root.type == bfd_link_hash_undefweak)
16110 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16114 /* If the call in the PLT entry binds locally, the associated
16115 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16116 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16117 than the .plt section. */
16118 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16121 if (eh->plt.noncall_refcount == 0
16122 && SYMBOL_REFERENCES_LOCAL (info, h))
16123 /* All non-call references can be resolved directly.
16124 This means that they can (and in some cases, must)
16125 resolve directly to the run-time target, rather than
16126 to the PLT. That in turns means that any .got entry
16127 would be equal to the .igot.plt entry, so there's
16128 no point having both. */
16129 h->got.refcount = 0;
16132 if (bfd_link_pic (info)
16134 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16136 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16138 /* If this symbol is not defined in a regular file, and we are
16139 not generating a shared library, then set the symbol to this
16140 location in the .plt. This is required to make function
16141 pointers compare as equal between the normal executable and
16142 the shared library. */
16143 if (! bfd_link_pic (info)
16144 && !h->def_regular)
16146 h->root.u.def.section = htab->root.splt;
16147 h->root.u.def.value = h->plt.offset;
16149 /* Make sure the function is not marked as Thumb, in case
16150 it is the target of an ABS32 relocation, which will
16151 point to the PLT entry. */
16152 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16155 /* VxWorks executables have a second set of relocations for
16156 each PLT entry. They go in a separate relocation section,
16157 which is processed by the kernel loader. */
16158 if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16160 /* There is a relocation for the initial PLT entry:
16161 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16162 if (h->plt.offset == htab->plt_header_size)
16163 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16165 /* There are two extra relocations for each subsequent
16166 PLT entry: an R_ARM_32 relocation for the GOT entry,
16167 and an R_ARM_32 relocation for the PLT entry. */
16168 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16173 h->plt.offset = (bfd_vma) -1;
16179 h->plt.offset = (bfd_vma) -1;
16183 eh = (struct elf32_arm_link_hash_entry *) h;
16184 eh->tlsdesc_got = (bfd_vma) -1;
16186 if (h->got.refcount > 0)
16190 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16193 /* Make sure this symbol is output as a dynamic symbol.
16194 Undefined weak syms won't yet be marked as dynamic. */
16195 if (htab->root.dynamic_sections_created
16196 && h->dynindx == -1
16197 && !h->forced_local
16198 && h->root.type == bfd_link_hash_undefweak)
16200 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16204 s = htab->root.sgot;
16205 h->got.offset = s->size;
16207 if (tls_type == GOT_UNKNOWN)
16210 if (tls_type == GOT_NORMAL)
16211 /* Non-TLS symbols need one GOT slot. */
16215 if (tls_type & GOT_TLS_GDESC)
16217 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16219 = (htab->root.sgotplt->size
16220 - elf32_arm_compute_jump_table_size (htab));
16221 htab->root.sgotplt->size += 8;
16222 h->got.offset = (bfd_vma) -2;
16223 /* plt.got_offset needs to know there's a TLS_DESC
16224 reloc in the middle of .got.plt. */
16225 htab->num_tls_desc++;
16228 if (tls_type & GOT_TLS_GD)
16230 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16231 consecutive GOT slots. If the symbol is both GD
16232 and GDESC, got.offset may have been
16234 h->got.offset = s->size;
16238 if (tls_type & GOT_TLS_IE)
16239 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16244 dyn = htab->root.dynamic_sections_created;
16247 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16248 && (!bfd_link_pic (info)
16249 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16252 if (tls_type != GOT_NORMAL
16253 && (bfd_link_dll (info) || indx != 0)
16254 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16255 || h->root.type != bfd_link_hash_undefweak))
16257 if (tls_type & GOT_TLS_IE)
16258 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16260 if (tls_type & GOT_TLS_GD)
16261 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16263 if (tls_type & GOT_TLS_GDESC)
16265 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16266 /* GDESC needs a trampoline to jump to. */
16267 htab->tls_trampoline = -1;
16270 /* Only GD needs it. GDESC just emits one relocation per
16272 if ((tls_type & GOT_TLS_GD) && indx != 0)
16273 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16275 else if (((indx != -1) || htab->fdpic_p)
16276 && !SYMBOL_REFERENCES_LOCAL (info, h))
16278 if (htab->root.dynamic_sections_created)
16279 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16280 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16282 else if (h->type == STT_GNU_IFUNC
16283 && eh->plt.noncall_refcount == 0)
16284 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16285 they all resolve dynamically instead. Reserve room for the
16286 GOT entry's R_ARM_IRELATIVE relocation. */
16287 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16288 else if (bfd_link_pic (info)
16289 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16290 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16291 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16292 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16293 /* Reserve room for rofixup for FDPIC executable. */
16294 /* TLS relocs do not need space since they are completely
16296 htab->srofixup->size += 4;
16299 h->got.offset = (bfd_vma) -1;
16301 /* FDPIC support. */
16302 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16304 /* Symbol musn't be exported. */
16305 if (h->dynindx != -1)
16308 /* We only allocate one function descriptor with its associated
16310 if (eh->fdpic_cnts.funcdesc_offset == -1)
16312 asection *s = htab->root.sgot;
16314 eh->fdpic_cnts.funcdesc_offset = s->size;
16316 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16317 if (bfd_link_pic(info))
16318 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16320 htab->srofixup->size += 8;
16324 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16326 asection *s = htab->root.sgot;
16328 if (htab->root.dynamic_sections_created && h->dynindx == -1
16329 && !h->forced_local)
16330 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16333 if (h->dynindx == -1)
16335 /* We only allocate one function descriptor with its
16336 associated relocation. */
16337 if (eh->fdpic_cnts.funcdesc_offset == -1)
16340 eh->fdpic_cnts.funcdesc_offset = s->size;
16342 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16344 if (bfd_link_pic(info))
16345 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16347 htab->srofixup->size += 8;
16351 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16352 R_ARM_RELATIVE/rofixup relocation on it. */
16353 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16355 if (h->dynindx == -1 && !bfd_link_pic(info))
16356 htab->srofixup->size += 4;
16358 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16361 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16363 if (htab->root.dynamic_sections_created && h->dynindx == -1
16364 && !h->forced_local)
16365 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16368 if (h->dynindx == -1)
16370 /* We only allocate one function descriptor with its
16371 associated relocation. */
16372 if (eh->fdpic_cnts.funcdesc_offset == -1)
16374 asection *s = htab->root.sgot;
16376 eh->fdpic_cnts.funcdesc_offset = s->size;
16378 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16380 if (bfd_link_pic(info))
16381 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16383 htab->srofixup->size += 8;
16386 if (h->dynindx == -1 && !bfd_link_pic(info))
16388 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16389 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16393 /* Will need one dynamic reloc per reference. will be either
16394 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16395 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16396 eh->fdpic_cnts.funcdesc_cnt);
16400 /* Allocate stubs for exported Thumb functions on v4t. */
16401 if (!htab->use_blx && h->dynindx != -1
16403 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16404 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16406 struct elf_link_hash_entry * th;
16407 struct bfd_link_hash_entry * bh;
16408 struct elf_link_hash_entry * myh;
16412 /* Create a new symbol to regist the real location of the function. */
16413 s = h->root.u.def.section;
16414 sprintf (name, "__real_%s", h->root.root.string);
16415 _bfd_generic_link_add_one_symbol (info, s->owner,
16416 name, BSF_GLOBAL, s,
16417 h->root.u.def.value,
16418 NULL, TRUE, FALSE, &bh);
16420 myh = (struct elf_link_hash_entry *) bh;
16421 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16422 myh->forced_local = 1;
16423 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16424 eh->export_glue = myh;
16425 th = record_arm_to_thumb_glue (info, h);
16426 /* Point the symbol at the stub. */
16427 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16428 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16429 h->root.u.def.section = th->root.u.def.section;
16430 h->root.u.def.value = th->root.u.def.value & ~1;
16433 if (h->dyn_relocs == NULL)
16436 /* In the shared -Bsymbolic case, discard space allocated for
16437 dynamic pc-relative relocs against symbols which turn out to be
16438 defined in regular objects. For the normal shared case, discard
16439 space for pc-relative relocs that have become local due to symbol
16440 visibility changes. */
16442 if (bfd_link_pic (info)
16443 || htab->root.is_relocatable_executable
16446 /* Relocs that use pc_count are PC-relative forms, which will appear
16447 on something like ".long foo - ." or "movw REG, foo - .". We want
16448 calls to protected symbols to resolve directly to the function
16449 rather than going via the plt. If people want function pointer
16450 comparisons to work as expected then they should avoid writing
16451 assembly like ".long foo - .". */
16452 if (SYMBOL_CALLS_LOCAL (info, h))
16454 struct elf_dyn_relocs **pp;
16456 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16458 p->count -= p->pc_count;
16467 if (htab->root.target_os == is_vxworks)
16469 struct elf_dyn_relocs **pp;
16471 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16473 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16480 /* Also discard relocs on undefined weak syms with non-default
16482 if (h->dyn_relocs != NULL
16483 && h->root.type == bfd_link_hash_undefweak)
16485 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16486 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16487 h->dyn_relocs = NULL;
16489 /* Make sure undefined weak symbols are output as a dynamic
16491 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16492 && !h->forced_local)
16494 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16499 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16500 && h->root.type == bfd_link_hash_new)
16502 /* Output absolute symbols so that we can create relocations
16503 against them. For normal symbols we output a relocation
16504 against the section that contains them. */
16505 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16512 /* For the non-shared case, discard space for relocs against
16513 symbols which turn out to need copy relocs or are not
16516 if (!h->non_got_ref
16517 && ((h->def_dynamic
16518 && !h->def_regular)
16519 || (htab->root.dynamic_sections_created
16520 && (h->root.type == bfd_link_hash_undefweak
16521 || h->root.type == bfd_link_hash_undefined))))
16523 /* Make sure this symbol is output as a dynamic symbol.
16524 Undefined weak syms won't yet be marked as dynamic. */
16525 if (h->dynindx == -1 && !h->forced_local
16526 && h->root.type == bfd_link_hash_undefweak)
16528 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16532 /* If that succeeded, we know we'll be keeping all the
16534 if (h->dynindx != -1)
16538 h->dyn_relocs = NULL;
16543 /* Finally, allocate space. */
16544 for (p = h->dyn_relocs; p != NULL; p = p->next)
16546 asection *sreloc = elf_section_data (p->sec)->sreloc;
16548 if (h->type == STT_GNU_IFUNC
16549 && eh->plt.noncall_refcount == 0
16550 && SYMBOL_REFERENCES_LOCAL (info, h))
16551 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16552 else if (h->dynindx != -1
16553 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16554 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16555 else if (htab->fdpic_p && !bfd_link_pic(info))
16556 htab->srofixup->size += 4 * p->count;
16558 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16565 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16568 struct elf32_arm_link_hash_table *globals;
16570 globals = elf32_arm_hash_table (info);
16571 if (globals == NULL)
16574 globals->byteswap_code = byteswap_code;
16577 /* Set the sizes of the dynamic sections. */
16580 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16581 struct bfd_link_info * info)
16585 bfd_boolean relocs;
16587 struct elf32_arm_link_hash_table *htab;
16589 htab = elf32_arm_hash_table (info);
16593 dynobj = elf_hash_table (info)->dynobj;
16594 BFD_ASSERT (dynobj != NULL);
16595 check_use_blx (htab);
16597 if (elf_hash_table (info)->dynamic_sections_created)
16599 /* Set the contents of the .interp section to the interpreter. */
16600 if (bfd_link_executable (info) && !info->nointerp)
16602 s = bfd_get_linker_section (dynobj, ".interp");
16603 BFD_ASSERT (s != NULL);
16604 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16605 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16609 /* Set up .got offsets for local syms, and space for local dynamic
16611 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16613 bfd_signed_vma *local_got;
16614 bfd_signed_vma *end_local_got;
16615 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16616 char *local_tls_type;
16617 bfd_vma *local_tlsdesc_gotent;
16618 bfd_size_type locsymcount;
16619 Elf_Internal_Shdr *symtab_hdr;
16621 unsigned int symndx;
16622 struct fdpic_local *local_fdpic_cnts;
16624 if (! is_arm_elf (ibfd))
16627 for (s = ibfd->sections; s != NULL; s = s->next)
16629 struct elf_dyn_relocs *p;
16631 for (p = (struct elf_dyn_relocs *)
16632 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16634 if (!bfd_is_abs_section (p->sec)
16635 && bfd_is_abs_section (p->sec->output_section))
16637 /* Input section has been discarded, either because
16638 it is a copy of a linkonce section or due to
16639 linker script /DISCARD/, so we'll be discarding
16642 else if (htab->root.target_os == is_vxworks
16643 && strcmp (p->sec->output_section->name,
16646 /* Relocations in vxworks .tls_vars sections are
16647 handled specially by the loader. */
16649 else if (p->count != 0)
16651 srel = elf_section_data (p->sec)->sreloc;
16652 if (htab->fdpic_p && !bfd_link_pic(info))
16653 htab->srofixup->size += 4 * p->count;
16655 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16656 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16657 info->flags |= DF_TEXTREL;
16662 local_got = elf_local_got_refcounts (ibfd);
16666 symtab_hdr = & elf_symtab_hdr (ibfd);
16667 locsymcount = symtab_hdr->sh_info;
16668 end_local_got = local_got + locsymcount;
16669 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16670 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16671 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16672 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16674 s = htab->root.sgot;
16675 srel = htab->root.srelgot;
16676 for (; local_got < end_local_got;
16677 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16678 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16680 *local_tlsdesc_gotent = (bfd_vma) -1;
16681 local_iplt = *local_iplt_ptr;
16683 /* FDPIC support. */
16684 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16686 if (local_fdpic_cnts->funcdesc_offset == -1)
16688 local_fdpic_cnts->funcdesc_offset = s->size;
16691 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16692 if (bfd_link_pic(info))
16693 elf32_arm_allocate_dynrelocs (info, srel, 1);
16695 htab->srofixup->size += 8;
16699 if (local_fdpic_cnts->funcdesc_cnt > 0)
16701 if (local_fdpic_cnts->funcdesc_offset == -1)
16703 local_fdpic_cnts->funcdesc_offset = s->size;
16706 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16707 if (bfd_link_pic(info))
16708 elf32_arm_allocate_dynrelocs (info, srel, 1);
16710 htab->srofixup->size += 8;
16713 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16714 if (bfd_link_pic(info))
16715 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16717 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16720 if (local_iplt != NULL)
16722 struct elf_dyn_relocs *p;
16724 if (local_iplt->root.refcount > 0)
16726 elf32_arm_allocate_plt_entry (info, TRUE,
16729 if (local_iplt->arm.noncall_refcount == 0)
16730 /* All references to the PLT are calls, so all
16731 non-call references can resolve directly to the
16732 run-time target. This means that the .got entry
16733 would be the same as the .igot.plt entry, so there's
16734 no point creating both. */
16739 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16740 local_iplt->root.offset = (bfd_vma) -1;
16743 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16747 psrel = elf_section_data (p->sec)->sreloc;
16748 if (local_iplt->arm.noncall_refcount == 0)
16749 elf32_arm_allocate_irelocs (info, psrel, p->count);
16751 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16754 if (*local_got > 0)
16756 Elf_Internal_Sym *isym;
16758 *local_got = s->size;
16759 if (*local_tls_type & GOT_TLS_GD)
16760 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16762 if (*local_tls_type & GOT_TLS_GDESC)
16764 *local_tlsdesc_gotent = htab->root.sgotplt->size
16765 - elf32_arm_compute_jump_table_size (htab);
16766 htab->root.sgotplt->size += 8;
16767 *local_got = (bfd_vma) -2;
16768 /* plt.got_offset needs to know there's a TLS_DESC
16769 reloc in the middle of .got.plt. */
16770 htab->num_tls_desc++;
16772 if (*local_tls_type & GOT_TLS_IE)
16775 if (*local_tls_type & GOT_NORMAL)
16777 /* If the symbol is both GD and GDESC, *local_got
16778 may have been overwritten. */
16779 *local_got = s->size;
16783 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
16788 /* If all references to an STT_GNU_IFUNC PLT are calls,
16789 then all non-call references, including this GOT entry,
16790 resolve directly to the run-time target. */
16791 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16792 && (local_iplt == NULL
16793 || local_iplt->arm.noncall_refcount == 0))
16794 elf32_arm_allocate_irelocs (info, srel, 1);
16795 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16797 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16798 elf32_arm_allocate_dynrelocs (info, srel, 1);
16799 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16800 htab->srofixup->size += 4;
16802 if ((bfd_link_pic (info) || htab->fdpic_p)
16803 && *local_tls_type & GOT_TLS_GDESC)
16805 elf32_arm_allocate_dynrelocs (info,
16806 htab->root.srelplt, 1);
16807 htab->tls_trampoline = -1;
16812 *local_got = (bfd_vma) -1;
16816 if (htab->tls_ldm_got.refcount > 0)
16818 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16819 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16820 htab->tls_ldm_got.offset = htab->root.sgot->size;
16821 htab->root.sgot->size += 8;
16822 if (bfd_link_pic (info))
16823 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16826 htab->tls_ldm_got.offset = -1;
16828 /* At the very end of the .rofixup section is a pointer to the GOT,
16829 reserve space for it. */
16830 if (htab->fdpic_p && htab->srofixup != NULL)
16831 htab->srofixup->size += 4;
16833 /* Allocate global sym .plt and .got entries, and space for global
16834 sym dynamic relocs. */
16835 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16837 /* Here we rummage through the found bfds to collect glue information. */
16838 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16840 if (! is_arm_elf (ibfd))
16843 /* Initialise mapping tables for code/data. */
16844 bfd_elf32_arm_init_maps (ibfd);
16846 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16847 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16848 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16849 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16852 /* Allocate space for the glue sections now that we've sized them. */
16853 bfd_elf32_arm_allocate_interworking_sections (info);
16855 /* For every jump slot reserved in the sgotplt, reloc_count is
16856 incremented. However, when we reserve space for TLS descriptors,
16857 it's not incremented, so in order to compute the space reserved
16858 for them, it suffices to multiply the reloc count by the jump
16860 if (htab->root.srelplt)
16861 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
16863 if (htab->tls_trampoline)
16865 if (htab->root.splt->size == 0)
16866 htab->root.splt->size += htab->plt_header_size;
16868 htab->tls_trampoline = htab->root.splt->size;
16869 htab->root.splt->size += htab->plt_entry_size;
16871 /* If we're not using lazy TLS relocations, don't generate the
16872 PLT and GOT entries they require. */
16873 if ((info->flags & DF_BIND_NOW))
16874 htab->root.tlsdesc_plt = 0;
16877 htab->root.tlsdesc_got = htab->root.sgot->size;
16878 htab->root.sgot->size += 4;
16880 htab->root.tlsdesc_plt = htab->root.splt->size;
16881 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16885 /* The check_relocs and adjust_dynamic_symbol entry points have
16886 determined the sizes of the various dynamic sections. Allocate
16887 memory for them. */
16889 for (s = dynobj->sections; s != NULL; s = s->next)
16893 if ((s->flags & SEC_LINKER_CREATED) == 0)
16896 /* It's OK to base decisions on the section name, because none
16897 of the dynobj section names depend upon the input files. */
16898 name = bfd_section_name (s);
16900 if (s == htab->root.splt)
16902 /* Remember whether there is a PLT. */
16905 else if (CONST_STRNEQ (name, ".rel"))
16909 /* Remember whether there are any reloc sections other
16910 than .rel(a).plt and .rela.plt.unloaded. */
16911 if (s != htab->root.srelplt && s != htab->srelplt2)
16914 /* We use the reloc_count field as a counter if we need
16915 to copy relocs into the output file. */
16916 s->reloc_count = 0;
16919 else if (s != htab->root.sgot
16920 && s != htab->root.sgotplt
16921 && s != htab->root.iplt
16922 && s != htab->root.igotplt
16923 && s != htab->root.sdynbss
16924 && s != htab->root.sdynrelro
16925 && s != htab->srofixup)
16927 /* It's not one of our sections, so don't allocate space. */
16933 /* If we don't need this section, strip it from the
16934 output file. This is mostly to handle .rel(a).bss and
16935 .rel(a).plt. We must create both sections in
16936 create_dynamic_sections, because they must be created
16937 before the linker maps input sections to output
16938 sections. The linker does that before
16939 adjust_dynamic_symbol is called, and it is that
16940 function which decides whether anything needs to go
16941 into these sections. */
16942 s->flags |= SEC_EXCLUDE;
16946 if ((s->flags & SEC_HAS_CONTENTS) == 0)
16949 /* Allocate memory for the section contents. */
16950 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16951 if (s->contents == NULL)
16955 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
16959 /* Size sections even though they're not dynamic. We use it to setup
16960 _TLS_MODULE_BASE_, if needed. */
16963 elf32_arm_always_size_sections (bfd *output_bfd,
16964 struct bfd_link_info *info)
16967 struct elf32_arm_link_hash_table *htab;
16969 htab = elf32_arm_hash_table (info);
16971 if (bfd_link_relocatable (info))
16974 tls_sec = elf_hash_table (info)->tls_sec;
16978 struct elf_link_hash_entry *tlsbase;
16980 tlsbase = elf_link_hash_lookup
16981 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
16985 struct bfd_link_hash_entry *bh = NULL;
16986 const struct elf_backend_data *bed
16987 = get_elf_backend_data (output_bfd);
16989 if (!(_bfd_generic_link_add_one_symbol
16990 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
16991 tls_sec, 0, NULL, FALSE,
16992 bed->collect, &bh)))
16995 tlsbase->type = STT_TLS;
16996 tlsbase = (struct elf_link_hash_entry *)bh;
16997 tlsbase->def_regular = 1;
16998 tlsbase->other = STV_HIDDEN;
16999 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17003 if (htab->fdpic_p && !bfd_link_relocatable (info)
17004 && !bfd_elf_stack_segment_size (output_bfd, info,
17005 "__stacksize", DEFAULT_STACK_SIZE))
17011 /* Finish up dynamic symbol handling. We set the contents of various
17012 dynamic sections here. */
17015 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17016 struct bfd_link_info * info,
17017 struct elf_link_hash_entry * h,
17018 Elf_Internal_Sym * sym)
17020 struct elf32_arm_link_hash_table *htab;
17021 struct elf32_arm_link_hash_entry *eh;
17023 htab = elf32_arm_hash_table (info);
17027 eh = (struct elf32_arm_link_hash_entry *) h;
17029 if (h->plt.offset != (bfd_vma) -1)
17033 BFD_ASSERT (h->dynindx != -1);
17034 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17039 if (!h->def_regular)
17041 /* Mark the symbol as undefined, rather than as defined in
17042 the .plt section. */
17043 sym->st_shndx = SHN_UNDEF;
17044 /* If the symbol is weak we need to clear the value.
17045 Otherwise, the PLT entry would provide a definition for
17046 the symbol even if the symbol wasn't defined anywhere,
17047 and so the symbol would never be NULL. Leave the value if
17048 there were any relocations where pointer equality matters
17049 (this is a clue for the dynamic linker, to make function
17050 pointer comparisons work between an application and shared
17052 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17055 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17057 /* At least one non-call relocation references this .iplt entry,
17058 so the .iplt entry is the function's canonical address. */
17059 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17060 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17061 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17062 (output_bfd, htab->root.iplt->output_section));
17063 sym->st_value = (h->plt.offset
17064 + htab->root.iplt->output_section->vma
17065 + htab->root.iplt->output_offset);
17072 Elf_Internal_Rela rel;
17074 /* This symbol needs a copy reloc. Set it up. */
17075 BFD_ASSERT (h->dynindx != -1
17076 && (h->root.type == bfd_link_hash_defined
17077 || h->root.type == bfd_link_hash_defweak));
17080 rel.r_offset = (h->root.u.def.value
17081 + h->root.u.def.section->output_section->vma
17082 + h->root.u.def.section->output_offset);
17083 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17084 if (h->root.u.def.section == htab->root.sdynrelro)
17085 s = htab->root.sreldynrelro;
17087 s = htab->root.srelbss;
17088 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17091 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17092 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17093 it is relative to the ".got" section. */
17094 if (h == htab->root.hdynamic
17096 && htab->root.target_os != is_vxworks
17097 && h == htab->root.hgot))
17098 sym->st_shndx = SHN_ABS;
17104 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17106 const unsigned long *template, unsigned count)
17110 for (ix = 0; ix != count; ix++)
17112 unsigned long insn = template[ix];
17114 /* Emit mov pc,rx if bx is not permitted. */
17115 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17116 insn = (insn & 0xf000000f) | 0x01a0f000;
17117 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17121 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17122 other variants, NaCl needs this entry in a static executable's
17123 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17124 zero. For .iplt really only the last bundle is useful, and .iplt
17125 could have a shorter first entry, with each individual PLT entry's
17126 relative branch calculated differently so it targets the last
17127 bundle instead of the instruction before it (labelled .Lplt_tail
17128 above). But it's simpler to keep the size and layout of PLT0
17129 consistent with the dynamic case, at the cost of some dead code at
17130 the start of .iplt and the one dead store to the stack at the start
17133 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17134 asection *plt, bfd_vma got_displacement)
17138 put_arm_insn (htab, output_bfd,
17139 elf32_arm_nacl_plt0_entry[0]
17140 | arm_movw_immediate (got_displacement),
17141 plt->contents + 0);
17142 put_arm_insn (htab, output_bfd,
17143 elf32_arm_nacl_plt0_entry[1]
17144 | arm_movt_immediate (got_displacement),
17145 plt->contents + 4);
17147 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17148 put_arm_insn (htab, output_bfd,
17149 elf32_arm_nacl_plt0_entry[i],
17150 plt->contents + (i * 4));
17153 /* Finish up the dynamic sections. */
17156 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17161 struct elf32_arm_link_hash_table *htab;
17163 htab = elf32_arm_hash_table (info);
17167 dynobj = elf_hash_table (info)->dynobj;
17169 sgot = htab->root.sgotplt;
17170 /* A broken linker script might have discarded the dynamic sections.
17171 Catch this here so that we do not seg-fault later on. */
17172 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17174 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17176 if (elf_hash_table (info)->dynamic_sections_created)
17179 Elf32_External_Dyn *dyncon, *dynconend;
17181 splt = htab->root.splt;
17182 BFD_ASSERT (splt != NULL && sdyn != NULL);
17183 BFD_ASSERT (sgot != NULL);
17185 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17186 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17188 for (; dyncon < dynconend; dyncon++)
17190 Elf_Internal_Dyn dyn;
17194 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17199 if (htab->root.target_os == is_vxworks
17200 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17201 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17216 name = RELOC_SECTION (htab, ".plt");
17218 s = bfd_get_linker_section (dynobj, name);
17222 (_("could not find section %s"), name);
17223 bfd_set_error (bfd_error_invalid_operation);
17226 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17227 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17231 s = htab->root.srelplt;
17232 BFD_ASSERT (s != NULL);
17233 dyn.d_un.d_val = s->size;
17234 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17243 case DT_TLSDESC_PLT:
17244 s = htab->root.splt;
17245 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17246 + htab->root.tlsdesc_plt);
17247 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17250 case DT_TLSDESC_GOT:
17251 s = htab->root.sgot;
17252 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17253 + htab->root.tlsdesc_got);
17254 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17257 /* Set the bottom bit of DT_INIT/FINI if the
17258 corresponding function is Thumb. */
17260 name = info->init_function;
17263 name = info->fini_function;
17265 /* If it wasn't set by elf_bfd_final_link
17266 then there is nothing to adjust. */
17267 if (dyn.d_un.d_val != 0)
17269 struct elf_link_hash_entry * eh;
17271 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17272 FALSE, FALSE, TRUE);
17274 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17275 == ST_BRANCH_TO_THUMB)
17277 dyn.d_un.d_val |= 1;
17278 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17285 /* Fill in the first entry in the procedure linkage table. */
17286 if (splt->size > 0 && htab->plt_header_size)
17288 const bfd_vma *plt0_entry;
17289 bfd_vma got_address, plt_address, got_displacement;
17291 /* Calculate the addresses of the GOT and PLT. */
17292 got_address = sgot->output_section->vma + sgot->output_offset;
17293 plt_address = splt->output_section->vma + splt->output_offset;
17295 if (htab->root.target_os == is_vxworks)
17297 /* The VxWorks GOT is relocated by the dynamic linker.
17298 Therefore, we must emit relocations rather than simply
17299 computing the values now. */
17300 Elf_Internal_Rela rel;
17302 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17303 put_arm_insn (htab, output_bfd, plt0_entry[0],
17304 splt->contents + 0);
17305 put_arm_insn (htab, output_bfd, plt0_entry[1],
17306 splt->contents + 4);
17307 put_arm_insn (htab, output_bfd, plt0_entry[2],
17308 splt->contents + 8);
17309 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17311 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17312 rel.r_offset = plt_address + 12;
17313 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17315 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17316 htab->srelplt2->contents);
17318 else if (htab->root.target_os == is_nacl)
17319 arm_nacl_put_plt0 (htab, output_bfd, splt,
17320 got_address + 8 - (plt_address + 16));
17321 else if (using_thumb_only (htab))
17323 got_displacement = got_address - (plt_address + 12);
17325 plt0_entry = elf32_thumb2_plt0_entry;
17326 put_arm_insn (htab, output_bfd, plt0_entry[0],
17327 splt->contents + 0);
17328 put_arm_insn (htab, output_bfd, plt0_entry[1],
17329 splt->contents + 4);
17330 put_arm_insn (htab, output_bfd, plt0_entry[2],
17331 splt->contents + 8);
17333 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17337 got_displacement = got_address - (plt_address + 16);
17339 plt0_entry = elf32_arm_plt0_entry;
17340 put_arm_insn (htab, output_bfd, plt0_entry[0],
17341 splt->contents + 0);
17342 put_arm_insn (htab, output_bfd, plt0_entry[1],
17343 splt->contents + 4);
17344 put_arm_insn (htab, output_bfd, plt0_entry[2],
17345 splt->contents + 8);
17346 put_arm_insn (htab, output_bfd, plt0_entry[3],
17347 splt->contents + 12);
17349 #ifdef FOUR_WORD_PLT
17350 /* The displacement value goes in the otherwise-unused
17351 last word of the second entry. */
17352 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17354 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17359 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17360 really seem like the right value. */
17361 if (splt->output_section->owner == output_bfd)
17362 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17364 if (htab->root.tlsdesc_plt)
17366 bfd_vma got_address
17367 = sgot->output_section->vma + sgot->output_offset;
17368 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17369 + htab->root.sgot->output_offset);
17370 bfd_vma plt_address
17371 = splt->output_section->vma + splt->output_offset;
17373 arm_put_trampoline (htab, output_bfd,
17374 splt->contents + htab->root.tlsdesc_plt,
17375 dl_tlsdesc_lazy_trampoline, 6);
17377 bfd_put_32 (output_bfd,
17378 gotplt_address + htab->root.tlsdesc_got
17379 - (plt_address + htab->root.tlsdesc_plt)
17380 - dl_tlsdesc_lazy_trampoline[6],
17381 splt->contents + htab->root.tlsdesc_plt + 24);
17382 bfd_put_32 (output_bfd,
17383 got_address - (plt_address + htab->root.tlsdesc_plt)
17384 - dl_tlsdesc_lazy_trampoline[7],
17385 splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17388 if (htab->tls_trampoline)
17390 arm_put_trampoline (htab, output_bfd,
17391 splt->contents + htab->tls_trampoline,
17392 tls_trampoline, 3);
17393 #ifdef FOUR_WORD_PLT
17394 bfd_put_32 (output_bfd, 0x00000000,
17395 splt->contents + htab->tls_trampoline + 12);
17399 if (htab->root.target_os == is_vxworks
17400 && !bfd_link_pic (info)
17401 && htab->root.splt->size > 0)
17403 /* Correct the .rel(a).plt.unloaded relocations. They will have
17404 incorrect symbol indexes. */
17408 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17409 / htab->plt_entry_size);
17410 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17412 for (; num_plts; num_plts--)
17414 Elf_Internal_Rela rel;
17416 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17417 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17418 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17419 p += RELOC_SIZE (htab);
17421 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17422 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17423 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17424 p += RELOC_SIZE (htab);
17429 if (htab->root.target_os == is_nacl
17430 && htab->root.iplt != NULL
17431 && htab->root.iplt->size > 0)
17432 /* NaCl uses a special first entry in .iplt too. */
17433 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17435 /* Fill in the first three entries in the global offset table. */
17438 if (sgot->size > 0)
17441 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17443 bfd_put_32 (output_bfd,
17444 sdyn->output_section->vma + sdyn->output_offset,
17446 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17447 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17450 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17453 /* At the very end of the .rofixup section is a pointer to the GOT. */
17454 if (htab->fdpic_p && htab->srofixup != NULL)
17456 struct elf_link_hash_entry *hgot = htab->root.hgot;
17458 bfd_vma got_value = hgot->root.u.def.value
17459 + hgot->root.u.def.section->output_section->vma
17460 + hgot->root.u.def.section->output_offset;
17462 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17464 /* Make sure we allocated and generated the same number of fixups. */
17465 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17472 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17474 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17475 struct elf32_arm_link_hash_table *globals;
17476 struct elf_segment_map *m;
17478 if (!_bfd_elf_init_file_header (abfd, link_info))
17481 i_ehdrp = elf_elfheader (abfd);
17483 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17484 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17485 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17489 globals = elf32_arm_hash_table (link_info);
17490 if (globals != NULL && globals->byteswap_code)
17491 i_ehdrp->e_flags |= EF_ARM_BE8;
17493 if (globals->fdpic_p)
17494 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17497 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17498 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17500 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17501 if (abi == AEABI_VFP_args_vfp)
17502 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17504 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17507 /* Scan segment to set p_flags attribute if it contains only sections with
17508 SHF_ARM_PURECODE flag. */
17509 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17515 for (j = 0; j < m->count; j++)
17517 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17523 m->p_flags_valid = 1;
17529 static enum elf_reloc_type_class
17530 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17531 const asection *rel_sec ATTRIBUTE_UNUSED,
17532 const Elf_Internal_Rela *rela)
17534 switch ((int) ELF32_R_TYPE (rela->r_info))
17536 case R_ARM_RELATIVE:
17537 return reloc_class_relative;
17538 case R_ARM_JUMP_SLOT:
17539 return reloc_class_plt;
17541 return reloc_class_copy;
17542 case R_ARM_IRELATIVE:
17543 return reloc_class_ifunc;
17545 return reloc_class_normal;
17550 arm_final_write_processing (bfd *abfd)
17552 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17556 elf32_arm_final_write_processing (bfd *abfd)
17558 arm_final_write_processing (abfd);
17559 return _bfd_elf_final_write_processing (abfd);
17562 /* Return TRUE if this is an unwinding table entry. */
17565 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17567 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17568 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17572 /* Set the type and flags for an ARM section. We do this by
17573 the section name, which is a hack, but ought to work. */
17576 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17580 name = bfd_section_name (sec);
17582 if (is_arm_elf_unwind_section_name (abfd, name))
17584 hdr->sh_type = SHT_ARM_EXIDX;
17585 hdr->sh_flags |= SHF_LINK_ORDER;
17588 if (sec->flags & SEC_ELF_PURECODE)
17589 hdr->sh_flags |= SHF_ARM_PURECODE;
17594 /* Handle an ARM specific section when reading an object file. This is
17595 called when bfd_section_from_shdr finds a section with an unknown
17599 elf32_arm_section_from_shdr (bfd *abfd,
17600 Elf_Internal_Shdr * hdr,
17604 /* There ought to be a place to keep ELF backend specific flags, but
17605 at the moment there isn't one. We just keep track of the
17606 sections by their name, instead. Fortunately, the ABI gives
17607 names for all the ARM specific sections, so we will probably get
17609 switch (hdr->sh_type)
17611 case SHT_ARM_EXIDX:
17612 case SHT_ARM_PREEMPTMAP:
17613 case SHT_ARM_ATTRIBUTES:
17620 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17626 static _arm_elf_section_data *
17627 get_arm_elf_section_data (asection * sec)
17629 if (sec && sec->owner && is_arm_elf (sec->owner))
17630 return elf32_arm_section_data (sec);
17638 struct bfd_link_info *info;
17641 int (*func) (void *, const char *, Elf_Internal_Sym *,
17642 asection *, struct elf_link_hash_entry *);
17643 } output_arch_syminfo;
17645 enum map_symbol_type
17653 /* Output a single mapping symbol. */
17656 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17657 enum map_symbol_type type,
17660 static const char *names[3] = {"$a", "$t", "$d"};
17661 Elf_Internal_Sym sym;
17663 sym.st_value = osi->sec->output_section->vma
17664 + osi->sec->output_offset
17668 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17669 sym.st_shndx = osi->sec_shndx;
17670 sym.st_target_internal = 0;
17671 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17672 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17675 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17676 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17679 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17680 bfd_boolean is_iplt_entry_p,
17681 union gotplt_union *root_plt,
17682 struct arm_plt_info *arm_plt)
17684 struct elf32_arm_link_hash_table *htab;
17685 bfd_vma addr, plt_header_size;
17687 if (root_plt->offset == (bfd_vma) -1)
17690 htab = elf32_arm_hash_table (osi->info);
17694 if (is_iplt_entry_p)
17696 osi->sec = htab->root.iplt;
17697 plt_header_size = 0;
17701 osi->sec = htab->root.splt;
17702 plt_header_size = htab->plt_header_size;
17704 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17705 (osi->info->output_bfd, osi->sec->output_section));
17707 addr = root_plt->offset & -2;
17708 if (htab->root.target_os == is_vxworks)
17710 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17712 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17714 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17716 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17719 else if (htab->root.target_os == is_nacl)
17721 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17724 else if (htab->fdpic_p)
17726 enum map_symbol_type type = using_thumb_only(htab)
17730 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17731 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17733 if (!elf32_arm_output_map_sym (osi, type, addr))
17735 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17737 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
17738 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17741 else if (using_thumb_only (htab))
17743 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17748 bfd_boolean thumb_stub_p;
17750 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17753 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17756 #ifdef FOUR_WORD_PLT
17757 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17759 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17762 /* A three-word PLT with no Thumb thunk contains only Arm code,
17763 so only need to output a mapping symbol for the first PLT entry and
17764 entries with thumb thunks. */
17765 if (thumb_stub_p || addr == plt_header_size)
17767 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17776 /* Output mapping symbols for PLT entries associated with H. */
17779 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17781 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17782 struct elf32_arm_link_hash_entry *eh;
17784 if (h->root.type == bfd_link_hash_indirect)
17787 if (h->root.type == bfd_link_hash_warning)
17788 /* When warning symbols are created, they **replace** the "real"
17789 entry in the hash table, thus we never get to see the real
17790 symbol in a hash traversal. So look at it now. */
17791 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17793 eh = (struct elf32_arm_link_hash_entry *) h;
17794 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17795 &h->plt, &eh->plt);
17798 /* Bind a veneered symbol to its veneer identified by its hash entry
17799 STUB_ENTRY. The veneered location thus loose its symbol. */
17802 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17804 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17807 hash->root.root.u.def.section = stub_entry->stub_sec;
17808 hash->root.root.u.def.value = stub_entry->stub_offset;
17809 hash->root.size = stub_entry->stub_size;
17812 /* Output a single local symbol for a generated stub. */
17815 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17816 bfd_vma offset, bfd_vma size)
17818 Elf_Internal_Sym sym;
17820 sym.st_value = osi->sec->output_section->vma
17821 + osi->sec->output_offset
17823 sym.st_size = size;
17825 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17826 sym.st_shndx = osi->sec_shndx;
17827 sym.st_target_internal = 0;
17828 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17832 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17835 struct elf32_arm_stub_hash_entry *stub_entry;
17836 asection *stub_sec;
17839 output_arch_syminfo *osi;
17840 const insn_sequence *template_sequence;
17841 enum stub_insn_type prev_type;
17844 enum map_symbol_type sym_type;
17846 /* Massage our args to the form they really have. */
17847 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17848 osi = (output_arch_syminfo *) in_arg;
17850 stub_sec = stub_entry->stub_sec;
17852 /* Ensure this stub is attached to the current section being
17854 if (stub_sec != osi->sec)
17857 addr = (bfd_vma) stub_entry->stub_offset;
17858 template_sequence = stub_entry->stub_template;
17860 if (arm_stub_sym_claimed (stub_entry->stub_type))
17861 arm_stub_claim_sym (stub_entry);
17864 stub_name = stub_entry->output_name;
17865 switch (template_sequence[0].type)
17868 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17869 stub_entry->stub_size))
17874 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17875 stub_entry->stub_size))
17884 prev_type = DATA_TYPE;
17886 for (i = 0; i < stub_entry->stub_template_size; i++)
17888 switch (template_sequence[i].type)
17891 sym_type = ARM_MAP_ARM;
17896 sym_type = ARM_MAP_THUMB;
17900 sym_type = ARM_MAP_DATA;
17908 if (template_sequence[i].type != prev_type)
17910 prev_type = template_sequence[i].type;
17911 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
17915 switch (template_sequence[i].type)
17939 /* Output mapping symbols for linker generated sections,
17940 and for those data-only sections that do not have a
17944 elf32_arm_output_arch_local_syms (bfd *output_bfd,
17945 struct bfd_link_info *info,
17947 int (*func) (void *, const char *,
17948 Elf_Internal_Sym *,
17950 struct elf_link_hash_entry *))
17952 output_arch_syminfo osi;
17953 struct elf32_arm_link_hash_table *htab;
17955 bfd_size_type size;
17958 htab = elf32_arm_hash_table (info);
17962 check_use_blx (htab);
17964 osi.flaginfo = flaginfo;
17968 /* Add a $d mapping symbol to data-only sections that
17969 don't have any mapping symbol. This may result in (harmless) redundant
17970 mapping symbols. */
17971 for (input_bfd = info->input_bfds;
17973 input_bfd = input_bfd->link.next)
17975 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
17976 for (osi.sec = input_bfd->sections;
17978 osi.sec = osi.sec->next)
17980 if (osi.sec->output_section != NULL
17981 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
17983 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
17984 == SEC_HAS_CONTENTS
17985 && get_arm_elf_section_data (osi.sec) != NULL
17986 && get_arm_elf_section_data (osi.sec)->mapcount == 0
17987 && osi.sec->size > 0
17988 && (osi.sec->flags & SEC_EXCLUDE) == 0)
17990 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17991 (output_bfd, osi.sec->output_section);
17992 if (osi.sec_shndx != (int)SHN_BAD)
17993 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
17998 /* ARM->Thumb glue. */
17999 if (htab->arm_glue_size > 0)
18001 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18002 ARM2THUMB_GLUE_SECTION_NAME);
18004 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18005 (output_bfd, osi.sec->output_section);
18006 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18007 || htab->pic_veneer)
18008 size = ARM2THUMB_PIC_GLUE_SIZE;
18009 else if (htab->use_blx)
18010 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18012 size = ARM2THUMB_STATIC_GLUE_SIZE;
18014 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18016 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18017 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18021 /* Thumb->ARM glue. */
18022 if (htab->thumb_glue_size > 0)
18024 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18025 THUMB2ARM_GLUE_SECTION_NAME);
18027 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18028 (output_bfd, osi.sec->output_section);
18029 size = THUMB2ARM_GLUE_SIZE;
18031 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18033 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18034 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18038 /* ARMv4 BX veneers. */
18039 if (htab->bx_glue_size > 0)
18041 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18042 ARM_BX_GLUE_SECTION_NAME);
18044 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18045 (output_bfd, osi.sec->output_section);
18047 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18050 /* Long calls stubs. */
18051 if (htab->stub_bfd && htab->stub_bfd->sections)
18053 asection* stub_sec;
18055 for (stub_sec = htab->stub_bfd->sections;
18057 stub_sec = stub_sec->next)
18059 /* Ignore non-stub sections. */
18060 if (!strstr (stub_sec->name, STUB_SUFFIX))
18063 osi.sec = stub_sec;
18065 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18066 (output_bfd, osi.sec->output_section);
18068 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18072 /* Finally, output mapping symbols for the PLT. */
18073 if (htab->root.splt && htab->root.splt->size > 0)
18075 osi.sec = htab->root.splt;
18076 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18077 (output_bfd, osi.sec->output_section));
18079 /* Output mapping symbols for the plt header. */
18080 if (htab->root.target_os == is_vxworks)
18082 /* VxWorks shared libraries have no PLT header. */
18083 if (!bfd_link_pic (info))
18085 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18087 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18091 else if (htab->root.target_os == is_nacl)
18093 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18096 else if (using_thumb_only (htab) && !htab->fdpic_p)
18098 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18100 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18102 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18105 else if (!htab->fdpic_p)
18107 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18109 #ifndef FOUR_WORD_PLT
18110 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18115 if (htab->root.target_os == is_nacl
18117 && htab->root.iplt->size > 0)
18119 /* NaCl uses a special first entry in .iplt too. */
18120 osi.sec = htab->root.iplt;
18121 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18122 (output_bfd, osi.sec->output_section));
18123 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18126 if ((htab->root.splt && htab->root.splt->size > 0)
18127 || (htab->root.iplt && htab->root.iplt->size > 0))
18129 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18130 for (input_bfd = info->input_bfds;
18132 input_bfd = input_bfd->link.next)
18134 struct arm_local_iplt_info **local_iplt;
18135 unsigned int i, num_syms;
18137 local_iplt = elf32_arm_local_iplt (input_bfd);
18138 if (local_iplt != NULL)
18140 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18141 for (i = 0; i < num_syms; i++)
18142 if (local_iplt[i] != NULL
18143 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18144 &local_iplt[i]->root,
18145 &local_iplt[i]->arm))
18150 if (htab->root.tlsdesc_plt != 0)
18152 /* Mapping symbols for the lazy tls trampoline. */
18153 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18154 htab->root.tlsdesc_plt))
18157 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18158 htab->root.tlsdesc_plt + 24))
18161 if (htab->tls_trampoline != 0)
18163 /* Mapping symbols for the tls trampoline. */
18164 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18166 #ifdef FOUR_WORD_PLT
18167 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18168 htab->tls_trampoline + 12))
18176 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18177 the import library. All SYMCOUNT symbols of ABFD can be examined
18178 from their pointers in SYMS. Pointers of symbols to keep should be
18179 stored continuously at the beginning of that array.
18181 Returns the number of symbols to keep. */
18183 static unsigned int
18184 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18185 struct bfd_link_info *info,
18186 asymbol **syms, long symcount)
18190 long src_count, dst_count = 0;
18191 struct elf32_arm_link_hash_table *htab;
18193 htab = elf32_arm_hash_table (info);
18194 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18198 cmse_name = (char *) bfd_malloc (maxnamelen);
18199 BFD_ASSERT (cmse_name);
18201 for (src_count = 0; src_count < symcount; src_count++)
18203 struct elf32_arm_link_hash_entry *cmse_hash;
18209 sym = syms[src_count];
18210 flags = sym->flags;
18211 name = (char *) bfd_asymbol_name (sym);
18213 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18215 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18218 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18219 if (namelen > maxnamelen)
18221 cmse_name = (char *)
18222 bfd_realloc (cmse_name, namelen);
18223 maxnamelen = namelen;
18225 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18226 cmse_hash = (struct elf32_arm_link_hash_entry *)
18227 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18230 || (cmse_hash->root.root.type != bfd_link_hash_defined
18231 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18232 || cmse_hash->root.type != STT_FUNC)
18235 syms[dst_count++] = sym;
18239 syms[dst_count] = NULL;
18244 /* Filter symbols of ABFD to include in the import library. All
18245 SYMCOUNT symbols of ABFD can be examined from their pointers in
18246 SYMS. Pointers of symbols to keep should be stored continuously at
18247 the beginning of that array.
18249 Returns the number of symbols to keep. */
18251 static unsigned int
18252 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18253 struct bfd_link_info *info,
18254 asymbol **syms, long symcount)
18256 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18258 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18259 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18260 library to be a relocatable object file. */
18261 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18262 if (globals->cmse_implib)
18263 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18265 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18268 /* Allocate target specific section data. */
18271 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18273 if (!sec->used_by_bfd)
18275 _arm_elf_section_data *sdata;
18276 size_t amt = sizeof (*sdata);
18278 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18281 sec->used_by_bfd = sdata;
18284 return _bfd_elf_new_section_hook (abfd, sec);
18288 /* Used to order a list of mapping symbols by address. */
18291 elf32_arm_compare_mapping (const void * a, const void * b)
18293 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18294 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18296 if (amap->vma > bmap->vma)
18298 else if (amap->vma < bmap->vma)
18300 else if (amap->type > bmap->type)
18301 /* Ensure results do not depend on the host qsort for objects with
18302 multiple mapping symbols at the same address by sorting on type
18305 else if (amap->type < bmap->type)
18311 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18313 static unsigned long
18314 offset_prel31 (unsigned long addr, bfd_vma offset)
18316 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18319 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18323 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18325 unsigned long first_word = bfd_get_32 (output_bfd, from);
18326 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18328 /* High bit of first word is supposed to be zero. */
18329 if ((first_word & 0x80000000ul) == 0)
18330 first_word = offset_prel31 (first_word, offset);
18332 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18333 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18334 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18335 second_word = offset_prel31 (second_word, offset);
18337 bfd_put_32 (output_bfd, first_word, to);
18338 bfd_put_32 (output_bfd, second_word, to + 4);
18341 /* Data for make_branch_to_a8_stub(). */
18343 struct a8_branch_to_stub_data
18345 asection *writing_section;
18346 bfd_byte *contents;
18350 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18351 places for a particular section. */
18354 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18357 struct elf32_arm_stub_hash_entry *stub_entry;
18358 struct a8_branch_to_stub_data *data;
18359 bfd_byte *contents;
18360 unsigned long branch_insn;
18361 bfd_vma veneered_insn_loc, veneer_entry_loc;
18362 bfd_signed_vma branch_offset;
18366 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18367 data = (struct a8_branch_to_stub_data *) in_arg;
18369 if (stub_entry->target_section != data->writing_section
18370 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18373 contents = data->contents;
18375 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18376 generated when both source and target are in the same section. */
18377 veneered_insn_loc = stub_entry->target_section->output_section->vma
18378 + stub_entry->target_section->output_offset
18379 + stub_entry->source_value;
18381 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18382 + stub_entry->stub_sec->output_offset
18383 + stub_entry->stub_offset;
18385 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18386 veneered_insn_loc &= ~3u;
18388 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18390 abfd = stub_entry->target_section->owner;
18391 loc = stub_entry->source_value;
18393 /* We attempt to avoid this condition by setting stubs_always_after_branch
18394 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18395 This check is just to be on the safe side... */
18396 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18398 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18399 "allocated in unsafe location"), abfd);
18403 switch (stub_entry->stub_type)
18405 case arm_stub_a8_veneer_b:
18406 case arm_stub_a8_veneer_b_cond:
18407 branch_insn = 0xf0009000;
18410 case arm_stub_a8_veneer_blx:
18411 branch_insn = 0xf000e800;
18414 case arm_stub_a8_veneer_bl:
18416 unsigned int i1, j1, i2, j2, s;
18418 branch_insn = 0xf000d000;
18421 if (branch_offset < -16777216 || branch_offset > 16777214)
18423 /* There's not much we can do apart from complain if this
18425 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18426 "of range (input file too large)"), abfd);
18430 /* i1 = not(j1 eor s), so:
18432 j1 = (not i1) eor s. */
18434 branch_insn |= (branch_offset >> 1) & 0x7ff;
18435 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18436 i2 = (branch_offset >> 22) & 1;
18437 i1 = (branch_offset >> 23) & 1;
18438 s = (branch_offset >> 24) & 1;
18441 branch_insn |= j2 << 11;
18442 branch_insn |= j1 << 13;
18443 branch_insn |= s << 26;
18452 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18453 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18458 /* Beginning of stm32l4xx work-around. */
18460 /* Functions encoding instructions necessary for the emission of the
18461 fix-stm32l4xx-629360.
18462 Encoding is extracted from the
18463 ARM (C) Architecture Reference Manual
18464 ARMv7-A and ARMv7-R edition
18465 ARM DDI 0406C.b (ID072512). */
18467 static inline bfd_vma
18468 create_instruction_branch_absolute (int branch_offset)
18470 /* A8.8.18 B (A8-334)
18471 B target_address (Encoding T4). */
18472 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18473 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18474 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18476 int s = ((branch_offset & 0x1000000) >> 24);
18477 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18478 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18480 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18481 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18483 bfd_vma patched_inst = 0xf0009000
18485 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18486 | j1 << 13 /* J1. */
18487 | j2 << 11 /* J2. */
18488 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18490 return patched_inst;
18493 static inline bfd_vma
18494 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18496 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18497 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18498 bfd_vma patched_inst = 0xe8900000
18499 | (/*W=*/wback << 21)
18501 | (reg_mask & 0x0000ffff);
18503 return patched_inst;
18506 static inline bfd_vma
18507 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18509 /* A8.8.60 LDMDB/LDMEA (A8-402)
18510 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18511 bfd_vma patched_inst = 0xe9100000
18512 | (/*W=*/wback << 21)
18514 | (reg_mask & 0x0000ffff);
18516 return patched_inst;
18519 static inline bfd_vma
18520 create_instruction_mov (int target_reg, int source_reg)
18522 /* A8.8.103 MOV (register) (A8-486)
18523 MOV Rd, Rm (Encoding T1). */
18524 bfd_vma patched_inst = 0x4600
18525 | (target_reg & 0x7)
18526 | ((target_reg & 0x8) >> 3) << 7
18527 | (source_reg << 3);
18529 return patched_inst;
18532 static inline bfd_vma
18533 create_instruction_sub (int target_reg, int source_reg, int value)
18535 /* A8.8.221 SUB (immediate) (A8-708)
18536 SUB Rd, Rn, #value (Encoding T3). */
18537 bfd_vma patched_inst = 0xf1a00000
18538 | (target_reg << 8)
18539 | (source_reg << 16)
18541 | ((value & 0x800) >> 11) << 26
18542 | ((value & 0x700) >> 8) << 12
18545 return patched_inst;
18548 static inline bfd_vma
18549 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18552 /* A8.8.332 VLDM (A8-922)
18553 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18554 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18555 | (/*W=*/wback << 21)
18557 | (num_words & 0x000000ff)
18558 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18559 | (first_reg & 0x00000001) << 22;
18561 return patched_inst;
18564 static inline bfd_vma
18565 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18568 /* A8.8.332 VLDM (A8-922)
18569 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18570 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18572 | (num_words & 0x000000ff)
18573 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18574 | (first_reg & 0x00000001) << 22;
18576 return patched_inst;
18579 static inline bfd_vma
18580 create_instruction_udf_w (int value)
18582 /* A8.8.247 UDF (A8-758)
18583 Undefined (Encoding T2). */
18584 bfd_vma patched_inst = 0xf7f0a000
18585 | (value & 0x00000fff)
18586 | (value & 0x000f0000) << 16;
18588 return patched_inst;
18591 static inline bfd_vma
18592 create_instruction_udf (int value)
18594 /* A8.8.247 UDF (A8-758)
18595 Undefined (Encoding T1). */
18596 bfd_vma patched_inst = 0xde00
18599 return patched_inst;
18602 /* Functions writing an instruction in memory, returning the next
18603 memory position to write to. */
18605 static inline bfd_byte *
18606 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18607 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18609 put_thumb2_insn (htab, output_bfd, insn, pt);
18613 static inline bfd_byte *
18614 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18615 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18617 put_thumb_insn (htab, output_bfd, insn, pt);
18621 /* Function filling up a region in memory with T1 and T2 UDFs taking
18622 care of alignment. */
18625 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18627 const bfd_byte * const base_stub_contents,
18628 bfd_byte * const from_stub_contents,
18629 const bfd_byte * const end_stub_contents)
18631 bfd_byte *current_stub_contents = from_stub_contents;
18633 /* Fill the remaining of the stub with deterministic contents : UDF
18635 Check if realignment is needed on modulo 4 frontier using T1, to
18637 if ((current_stub_contents < end_stub_contents)
18638 && !((current_stub_contents - base_stub_contents) % 2)
18639 && ((current_stub_contents - base_stub_contents) % 4))
18640 current_stub_contents =
18641 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18642 create_instruction_udf (0));
18644 for (; current_stub_contents < end_stub_contents;)
18645 current_stub_contents =
18646 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18647 create_instruction_udf_w (0));
18649 return current_stub_contents;
18652 /* Functions writing the stream of instructions equivalent to the
18653 derived sequence for ldmia, ldmdb, vldm respectively. */
18656 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18658 const insn32 initial_insn,
18659 const bfd_byte *const initial_insn_addr,
18660 bfd_byte *const base_stub_contents)
18662 int wback = (initial_insn & 0x00200000) >> 21;
18663 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18664 int insn_all_registers = initial_insn & 0x0000ffff;
18665 int insn_low_registers, insn_high_registers;
18666 int usable_register_mask;
18667 int nb_registers = elf32_arm_popcount (insn_all_registers);
18668 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18669 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18670 bfd_byte *current_stub_contents = base_stub_contents;
18672 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18674 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18675 smaller than 8 registers load sequences that do not cause the
18677 if (nb_registers <= 8)
18679 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18680 current_stub_contents =
18681 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18684 /* B initial_insn_addr+4. */
18686 current_stub_contents =
18687 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18688 create_instruction_branch_absolute
18689 (initial_insn_addr - current_stub_contents));
18691 /* Fill the remaining of the stub with deterministic contents. */
18692 current_stub_contents =
18693 stm32l4xx_fill_stub_udf (htab, output_bfd,
18694 base_stub_contents, current_stub_contents,
18695 base_stub_contents +
18696 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18701 /* - reg_list[13] == 0. */
18702 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18704 /* - reg_list[14] & reg_list[15] != 1. */
18705 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18707 /* - if (wback==1) reg_list[rn] == 0. */
18708 BFD_ASSERT (!wback || !restore_rn);
18710 /* - nb_registers > 8. */
18711 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18713 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18715 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18716 - One with the 7 lowest registers (register mask 0x007F)
18717 This LDM will finally contain between 2 and 7 registers
18718 - One with the 7 highest registers (register mask 0xDF80)
18719 This ldm will finally contain between 2 and 7 registers. */
18720 insn_low_registers = insn_all_registers & 0x007F;
18721 insn_high_registers = insn_all_registers & 0xDF80;
18723 /* A spare register may be needed during this veneer to temporarily
18724 handle the base register. This register will be restored with the
18725 last LDM operation.
18726 The usable register may be any general purpose register (that
18727 excludes PC, SP, LR : register mask is 0x1FFF). */
18728 usable_register_mask = 0x1FFF;
18730 /* Generate the stub function. */
18733 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18734 current_stub_contents =
18735 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18736 create_instruction_ldmia
18737 (rn, /*wback=*/1, insn_low_registers));
18739 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18740 current_stub_contents =
18741 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18742 create_instruction_ldmia
18743 (rn, /*wback=*/1, insn_high_registers));
18746 /* B initial_insn_addr+4. */
18747 current_stub_contents =
18748 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18749 create_instruction_branch_absolute
18750 (initial_insn_addr - current_stub_contents));
18753 else /* if (!wback). */
18757 /* If Rn is not part of the high-register-list, move it there. */
18758 if (!(insn_high_registers & (1 << rn)))
18760 /* Choose a Ri in the high-register-list that will be restored. */
18761 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18764 current_stub_contents =
18765 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18766 create_instruction_mov (ri, rn));
18769 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18770 current_stub_contents =
18771 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18772 create_instruction_ldmia
18773 (ri, /*wback=*/1, insn_low_registers));
18775 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18776 current_stub_contents =
18777 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18778 create_instruction_ldmia
18779 (ri, /*wback=*/0, insn_high_registers));
18783 /* B initial_insn_addr+4. */
18784 current_stub_contents =
18785 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18786 create_instruction_branch_absolute
18787 (initial_insn_addr - current_stub_contents));
18791 /* Fill the remaining of the stub with deterministic contents. */
18792 current_stub_contents =
18793 stm32l4xx_fill_stub_udf (htab, output_bfd,
18794 base_stub_contents, current_stub_contents,
18795 base_stub_contents +
18796 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18800 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18802 const insn32 initial_insn,
18803 const bfd_byte *const initial_insn_addr,
18804 bfd_byte *const base_stub_contents)
18806 int wback = (initial_insn & 0x00200000) >> 21;
18807 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18808 int insn_all_registers = initial_insn & 0x0000ffff;
18809 int insn_low_registers, insn_high_registers;
18810 int usable_register_mask;
18811 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18812 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18813 int nb_registers = elf32_arm_popcount (insn_all_registers);
18814 bfd_byte *current_stub_contents = base_stub_contents;
18816 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18818 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18819 smaller than 8 registers load sequences that do not cause the
18821 if (nb_registers <= 8)
18823 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18824 current_stub_contents =
18825 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18828 /* B initial_insn_addr+4. */
18829 current_stub_contents =
18830 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18831 create_instruction_branch_absolute
18832 (initial_insn_addr - current_stub_contents));
18834 /* Fill the remaining of the stub with deterministic contents. */
18835 current_stub_contents =
18836 stm32l4xx_fill_stub_udf (htab, output_bfd,
18837 base_stub_contents, current_stub_contents,
18838 base_stub_contents +
18839 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18844 /* - reg_list[13] == 0. */
18845 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18847 /* - reg_list[14] & reg_list[15] != 1. */
18848 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18850 /* - if (wback==1) reg_list[rn] == 0. */
18851 BFD_ASSERT (!wback || !restore_rn);
18853 /* - nb_registers > 8. */
18854 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18856 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18858 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18859 - One with the 7 lowest registers (register mask 0x007F)
18860 This LDM will finally contain between 2 and 7 registers
18861 - One with the 7 highest registers (register mask 0xDF80)
18862 This ldm will finally contain between 2 and 7 registers. */
18863 insn_low_registers = insn_all_registers & 0x007F;
18864 insn_high_registers = insn_all_registers & 0xDF80;
18866 /* A spare register may be needed during this veneer to temporarily
18867 handle the base register. This register will be restored with
18868 the last LDM operation.
18869 The usable register may be any general purpose register (that excludes
18870 PC, SP, LR : register mask is 0x1FFF). */
18871 usable_register_mask = 0x1FFF;
18873 /* Generate the stub function. */
18874 if (!wback && !restore_pc && !restore_rn)
18876 /* Choose a Ri in the low-register-list that will be restored. */
18877 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18880 current_stub_contents =
18881 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18882 create_instruction_mov (ri, rn));
18884 /* LDMDB Ri!, {R-high-register-list}. */
18885 current_stub_contents =
18886 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18887 create_instruction_ldmdb
18888 (ri, /*wback=*/1, insn_high_registers));
18890 /* LDMDB Ri, {R-low-register-list}. */
18891 current_stub_contents =
18892 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18893 create_instruction_ldmdb
18894 (ri, /*wback=*/0, insn_low_registers));
18896 /* B initial_insn_addr+4. */
18897 current_stub_contents =
18898 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18899 create_instruction_branch_absolute
18900 (initial_insn_addr - current_stub_contents));
18902 else if (wback && !restore_pc && !restore_rn)
18904 /* LDMDB Rn!, {R-high-register-list}. */
18905 current_stub_contents =
18906 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18907 create_instruction_ldmdb
18908 (rn, /*wback=*/1, insn_high_registers));
18910 /* LDMDB Rn!, {R-low-register-list}. */
18911 current_stub_contents =
18912 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18913 create_instruction_ldmdb
18914 (rn, /*wback=*/1, insn_low_registers));
18916 /* B initial_insn_addr+4. */
18917 current_stub_contents =
18918 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18919 create_instruction_branch_absolute
18920 (initial_insn_addr - current_stub_contents));
18922 else if (!wback && restore_pc && !restore_rn)
18924 /* Choose a Ri in the high-register-list that will be restored. */
18925 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18927 /* SUB Ri, Rn, #(4*nb_registers). */
18928 current_stub_contents =
18929 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18930 create_instruction_sub (ri, rn, (4 * nb_registers)));
18932 /* LDMIA Ri!, {R-low-register-list}. */
18933 current_stub_contents =
18934 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18935 create_instruction_ldmia
18936 (ri, /*wback=*/1, insn_low_registers));
18938 /* LDMIA Ri, {R-high-register-list}. */
18939 current_stub_contents =
18940 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18941 create_instruction_ldmia
18942 (ri, /*wback=*/0, insn_high_registers));
18944 else if (wback && restore_pc && !restore_rn)
18946 /* Choose a Ri in the high-register-list that will be restored. */
18947 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18949 /* SUB Rn, Rn, #(4*nb_registers) */
18950 current_stub_contents =
18951 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18952 create_instruction_sub (rn, rn, (4 * nb_registers)));
18955 current_stub_contents =
18956 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18957 create_instruction_mov (ri, rn));
18959 /* LDMIA Ri!, {R-low-register-list}. */
18960 current_stub_contents =
18961 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18962 create_instruction_ldmia
18963 (ri, /*wback=*/1, insn_low_registers));
18965 /* LDMIA Ri, {R-high-register-list}. */
18966 current_stub_contents =
18967 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18968 create_instruction_ldmia
18969 (ri, /*wback=*/0, insn_high_registers));
18971 else if (!wback && !restore_pc && restore_rn)
18974 if (!(insn_low_registers & (1 << rn)))
18976 /* Choose a Ri in the low-register-list that will be restored. */
18977 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18980 current_stub_contents =
18981 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18982 create_instruction_mov (ri, rn));
18985 /* LDMDB Ri!, {R-high-register-list}. */
18986 current_stub_contents =
18987 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18988 create_instruction_ldmdb
18989 (ri, /*wback=*/1, insn_high_registers));
18991 /* LDMDB Ri, {R-low-register-list}. */
18992 current_stub_contents =
18993 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18994 create_instruction_ldmdb
18995 (ri, /*wback=*/0, insn_low_registers));
18997 /* B initial_insn_addr+4. */
18998 current_stub_contents =
18999 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19000 create_instruction_branch_absolute
19001 (initial_insn_addr - current_stub_contents));
19003 else if (!wback && restore_pc && restore_rn)
19006 if (!(insn_high_registers & (1 << rn)))
19008 /* Choose a Ri in the high-register-list that will be restored. */
19009 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19012 /* SUB Ri, Rn, #(4*nb_registers). */
19013 current_stub_contents =
19014 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19015 create_instruction_sub (ri, rn, (4 * nb_registers)));
19017 /* LDMIA Ri!, {R-low-register-list}. */
19018 current_stub_contents =
19019 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19020 create_instruction_ldmia
19021 (ri, /*wback=*/1, insn_low_registers));
19023 /* LDMIA Ri, {R-high-register-list}. */
19024 current_stub_contents =
19025 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19026 create_instruction_ldmia
19027 (ri, /*wback=*/0, insn_high_registers));
19029 else if (wback && restore_rn)
19031 /* The assembler should not have accepted to encode this. */
19032 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19033 "undefined behavior.\n");
19036 /* Fill the remaining of the stub with deterministic contents. */
19037 current_stub_contents =
19038 stm32l4xx_fill_stub_udf (htab, output_bfd,
19039 base_stub_contents, current_stub_contents,
19040 base_stub_contents +
19041 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19046 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19048 const insn32 initial_insn,
19049 const bfd_byte *const initial_insn_addr,
19050 bfd_byte *const base_stub_contents)
19052 int num_words = initial_insn & 0xff;
19053 bfd_byte *current_stub_contents = base_stub_contents;
19055 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19057 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19058 smaller than 8 words load sequences that do not cause the
19060 if (num_words <= 8)
19062 /* Untouched instruction. */
19063 current_stub_contents =
19064 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19067 /* B initial_insn_addr+4. */
19068 current_stub_contents =
19069 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19070 create_instruction_branch_absolute
19071 (initial_insn_addr - current_stub_contents));
19075 bfd_boolean is_dp = /* DP encoding. */
19076 (initial_insn & 0xfe100f00) == 0xec100b00;
19077 bfd_boolean is_ia_nobang = /* (IA without !). */
19078 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19079 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19080 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19081 bfd_boolean is_db_bang = /* (DB with !). */
19082 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19083 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19084 /* d = UInt (Vd:D);. */
19085 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19086 | (((unsigned int)initial_insn << 9) >> 31);
19088 /* Compute the number of 8-words chunks needed to split. */
19089 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19092 /* The test coverage has been done assuming the following
19093 hypothesis that exactly one of the previous is_ predicates is
19095 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19096 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19098 /* We treat the cutting of the words in one pass for all
19099 cases, then we emit the adjustments:
19102 -> vldm rx!, {8_words_or_less} for each needed 8_word
19103 -> sub rx, rx, #size (list)
19106 -> vldm rx!, {8_words_or_less} for each needed 8_word
19107 This also handles vpop instruction (when rx is sp)
19110 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19111 for (chunk = 0; chunk < chunks; ++chunk)
19113 bfd_vma new_insn = 0;
19115 if (is_ia_nobang || is_ia_bang)
19117 new_insn = create_instruction_vldmia
19121 chunks - (chunk + 1) ?
19122 8 : num_words - chunk * 8,
19123 first_reg + chunk * 8);
19125 else if (is_db_bang)
19127 new_insn = create_instruction_vldmdb
19130 chunks - (chunk + 1) ?
19131 8 : num_words - chunk * 8,
19132 first_reg + chunk * 8);
19136 current_stub_contents =
19137 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19141 /* Only this case requires the base register compensation
19145 current_stub_contents =
19146 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19147 create_instruction_sub
19148 (base_reg, base_reg, 4*num_words));
19151 /* B initial_insn_addr+4. */
19152 current_stub_contents =
19153 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19154 create_instruction_branch_absolute
19155 (initial_insn_addr - current_stub_contents));
19158 /* Fill the remaining of the stub with deterministic contents. */
19159 current_stub_contents =
19160 stm32l4xx_fill_stub_udf (htab, output_bfd,
19161 base_stub_contents, current_stub_contents,
19162 base_stub_contents +
19163 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19167 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19169 const insn32 wrong_insn,
19170 const bfd_byte *const wrong_insn_addr,
19171 bfd_byte *const stub_contents)
19173 if (is_thumb2_ldmia (wrong_insn))
19174 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19175 wrong_insn, wrong_insn_addr,
19177 else if (is_thumb2_ldmdb (wrong_insn))
19178 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19179 wrong_insn, wrong_insn_addr,
19181 else if (is_thumb2_vldm (wrong_insn))
19182 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19183 wrong_insn, wrong_insn_addr,
19187 /* End of stm32l4xx work-around. */
19190 /* Do code byteswapping. Return FALSE afterwards so that the section is
19191 written out as normal. */
19194 elf32_arm_write_section (bfd *output_bfd,
19195 struct bfd_link_info *link_info,
19197 bfd_byte *contents)
19199 unsigned int mapcount, errcount;
19200 _arm_elf_section_data *arm_data;
19201 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19202 elf32_arm_section_map *map;
19203 elf32_vfp11_erratum_list *errnode;
19204 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19207 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19211 if (globals == NULL)
19214 /* If this section has not been allocated an _arm_elf_section_data
19215 structure then we cannot record anything. */
19216 arm_data = get_arm_elf_section_data (sec);
19217 if (arm_data == NULL)
19220 mapcount = arm_data->mapcount;
19221 map = arm_data->map;
19222 errcount = arm_data->erratumcount;
19226 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19228 for (errnode = arm_data->erratumlist; errnode != 0;
19229 errnode = errnode->next)
19231 bfd_vma target = errnode->vma - offset;
19233 switch (errnode->type)
19235 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19237 bfd_vma branch_to_veneer;
19238 /* Original condition code of instruction, plus bit mask for
19239 ARM B instruction. */
19240 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19243 /* The instruction is before the label. */
19246 /* Above offset included in -4 below. */
19247 branch_to_veneer = errnode->u.b.veneer->vma
19248 - errnode->vma - 4;
19250 if ((signed) branch_to_veneer < -(1 << 25)
19251 || (signed) branch_to_veneer >= (1 << 25))
19252 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19253 "range"), output_bfd);
19255 insn |= (branch_to_veneer >> 2) & 0xffffff;
19256 contents[endianflip ^ target] = insn & 0xff;
19257 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19258 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19259 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19263 case VFP11_ERRATUM_ARM_VENEER:
19265 bfd_vma branch_from_veneer;
19268 /* Take size of veneer into account. */
19269 branch_from_veneer = errnode->u.v.branch->vma
19270 - errnode->vma - 12;
19272 if ((signed) branch_from_veneer < -(1 << 25)
19273 || (signed) branch_from_veneer >= (1 << 25))
19274 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19275 "range"), output_bfd);
19277 /* Original instruction. */
19278 insn = errnode->u.v.branch->u.b.vfp_insn;
19279 contents[endianflip ^ target] = insn & 0xff;
19280 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19281 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19282 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19284 /* Branch back to insn after original insn. */
19285 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19286 contents[endianflip ^ (target + 4)] = insn & 0xff;
19287 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19288 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19289 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19299 if (arm_data->stm32l4xx_erratumcount != 0)
19301 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19302 stm32l4xx_errnode != 0;
19303 stm32l4xx_errnode = stm32l4xx_errnode->next)
19305 bfd_vma target = stm32l4xx_errnode->vma - offset;
19307 switch (stm32l4xx_errnode->type)
19309 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19312 bfd_vma branch_to_veneer =
19313 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19315 if ((signed) branch_to_veneer < -(1 << 24)
19316 || (signed) branch_to_veneer >= (1 << 24))
19318 bfd_vma out_of_range =
19319 ((signed) branch_to_veneer < -(1 << 24)) ?
19320 - branch_to_veneer - (1 << 24) :
19321 ((signed) branch_to_veneer >= (1 << 24)) ?
19322 branch_to_veneer - (1 << 24) : 0;
19325 (_("%pB(%#" PRIx64 "): error: "
19326 "cannot create STM32L4XX veneer; "
19327 "jump out of range by %" PRId64 " bytes; "
19328 "cannot encode branch instruction"),
19330 (uint64_t) (stm32l4xx_errnode->vma - 4),
19331 (int64_t) out_of_range);
19335 insn = create_instruction_branch_absolute
19336 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19338 /* The instruction is before the label. */
19341 put_thumb2_insn (globals, output_bfd,
19342 (bfd_vma) insn, contents + target);
19346 case STM32L4XX_ERRATUM_VENEER:
19349 bfd_byte * veneer_r;
19352 veneer = contents + target;
19354 + stm32l4xx_errnode->u.b.veneer->vma
19355 - stm32l4xx_errnode->vma - 4;
19357 if ((signed) (veneer_r - veneer -
19358 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19359 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19360 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19361 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19362 || (signed) (veneer_r - veneer) >= (1 << 24))
19364 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19365 "veneer"), output_bfd);
19369 /* Original instruction. */
19370 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19372 stm32l4xx_create_replacing_stub
19373 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19383 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19385 arm_unwind_table_edit *edit_node
19386 = arm_data->u.exidx.unwind_edit_list;
19387 /* Now, sec->size is the size of the section we will write. The original
19388 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19389 markers) was sec->rawsize. (This isn't the case if we perform no
19390 edits, then rawsize will be zero and we should use size). */
19391 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19392 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19393 unsigned int in_index, out_index;
19394 bfd_vma add_to_offsets = 0;
19396 if (edited_contents == NULL)
19398 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19402 unsigned int edit_index = edit_node->index;
19404 if (in_index < edit_index && in_index * 8 < input_size)
19406 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19407 contents + in_index * 8, add_to_offsets);
19411 else if (in_index == edit_index
19412 || (in_index * 8 >= input_size
19413 && edit_index == UINT_MAX))
19415 switch (edit_node->type)
19417 case DELETE_EXIDX_ENTRY:
19419 add_to_offsets += 8;
19422 case INSERT_EXIDX_CANTUNWIND_AT_END:
19424 asection *text_sec = edit_node->linked_section;
19425 bfd_vma text_offset = text_sec->output_section->vma
19426 + text_sec->output_offset
19428 bfd_vma exidx_offset = offset + out_index * 8;
19429 unsigned long prel31_offset;
19431 /* Note: this is meant to be equivalent to an
19432 R_ARM_PREL31 relocation. These synthetic
19433 EXIDX_CANTUNWIND markers are not relocated by the
19434 usual BFD method. */
19435 prel31_offset = (text_offset - exidx_offset)
19437 if (bfd_link_relocatable (link_info))
19439 /* Here relocation for new EXIDX_CANTUNWIND is
19440 created, so there is no need to
19441 adjust offset by hand. */
19442 prel31_offset = text_sec->output_offset
19446 /* First address we can't unwind. */
19447 bfd_put_32 (output_bfd, prel31_offset,
19448 &edited_contents[out_index * 8]);
19450 /* Code for EXIDX_CANTUNWIND. */
19451 bfd_put_32 (output_bfd, 0x1,
19452 &edited_contents[out_index * 8 + 4]);
19455 add_to_offsets -= 8;
19460 edit_node = edit_node->next;
19465 /* No more edits, copy remaining entries verbatim. */
19466 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19467 contents + in_index * 8, add_to_offsets);
19473 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19474 bfd_set_section_contents (output_bfd, sec->output_section,
19476 (file_ptr) sec->output_offset, sec->size);
19481 /* Fix code to point to Cortex-A8 erratum stubs. */
19482 if (globals->fix_cortex_a8)
19484 struct a8_branch_to_stub_data data;
19486 data.writing_section = sec;
19487 data.contents = contents;
19489 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19496 if (globals->byteswap_code)
19498 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19501 for (i = 0; i < mapcount; i++)
19503 if (i == mapcount - 1)
19506 end = map[i + 1].vma;
19508 switch (map[i].type)
19511 /* Byte swap code words. */
19512 while (ptr + 3 < end)
19514 tmp = contents[ptr];
19515 contents[ptr] = contents[ptr + 3];
19516 contents[ptr + 3] = tmp;
19517 tmp = contents[ptr + 1];
19518 contents[ptr + 1] = contents[ptr + 2];
19519 contents[ptr + 2] = tmp;
19525 /* Byte swap code halfwords. */
19526 while (ptr + 1 < end)
19528 tmp = contents[ptr];
19529 contents[ptr] = contents[ptr + 1];
19530 contents[ptr + 1] = tmp;
19536 /* Leave data alone. */
19544 arm_data->mapcount = -1;
19545 arm_data->mapsize = 0;
19546 arm_data->map = NULL;
19551 /* Mangle thumb function symbols as we read them in. */
19554 elf32_arm_swap_symbol_in (bfd * abfd,
19557 Elf_Internal_Sym *dst)
19559 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19561 dst->st_target_internal = 0;
19563 /* New EABI objects mark thumb function symbols by setting the low bit of
19565 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19566 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19568 if (dst->st_value & 1)
19570 dst->st_value &= ~(bfd_vma) 1;
19571 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19572 ST_BRANCH_TO_THUMB);
19575 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19577 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19579 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19580 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19582 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19583 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19585 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19591 /* Mangle thumb function symbols as we write them out. */
19594 elf32_arm_swap_symbol_out (bfd *abfd,
19595 const Elf_Internal_Sym *src,
19599 Elf_Internal_Sym newsym;
19601 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19602 of the address set, as per the new EABI. We do this unconditionally
19603 because objcopy does not set the elf header flags until after
19604 it writes out the symbol table. */
19605 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19608 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19609 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19610 if (newsym.st_shndx != SHN_UNDEF)
19612 /* Do this only for defined symbols. At link type, the static
19613 linker will simulate the work of dynamic linker of resolving
19614 symbols and will carry over the thumbness of found symbols to
19615 the output symbol table. It's not clear how it happens, but
19616 the thumbness of undefined symbols can well be different at
19617 runtime, and writing '1' for them will be confusing for users
19618 and possibly for dynamic linker itself.
19620 newsym.st_value |= 1;
19625 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19628 /* Add the PT_ARM_EXIDX program header. */
19631 elf32_arm_modify_segment_map (bfd *abfd,
19632 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19634 struct elf_segment_map *m;
19637 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19638 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19640 /* If there is already a PT_ARM_EXIDX header, then we do not
19641 want to add another one. This situation arises when running
19642 "strip"; the input binary already has the header. */
19643 m = elf_seg_map (abfd);
19644 while (m && m->p_type != PT_ARM_EXIDX)
19648 m = (struct elf_segment_map *)
19649 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19652 m->p_type = PT_ARM_EXIDX;
19654 m->sections[0] = sec;
19656 m->next = elf_seg_map (abfd);
19657 elf_seg_map (abfd) = m;
19664 /* We may add a PT_ARM_EXIDX program header. */
19667 elf32_arm_additional_program_headers (bfd *abfd,
19668 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19672 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19673 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19679 /* Hook called by the linker routine which adds symbols from an object
19683 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19684 Elf_Internal_Sym *sym, const char **namep,
19685 flagword *flagsp, asection **secp, bfd_vma *valp)
19687 if (elf32_arm_hash_table (info) == NULL)
19690 if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19691 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19692 flagsp, secp, valp))
19698 /* We use this to override swap_symbol_in and swap_symbol_out. */
19699 const struct elf_size_info elf32_arm_size_info =
19701 sizeof (Elf32_External_Ehdr),
19702 sizeof (Elf32_External_Phdr),
19703 sizeof (Elf32_External_Shdr),
19704 sizeof (Elf32_External_Rel),
19705 sizeof (Elf32_External_Rela),
19706 sizeof (Elf32_External_Sym),
19707 sizeof (Elf32_External_Dyn),
19708 sizeof (Elf_External_Note),
19712 ELFCLASS32, EV_CURRENT,
19713 bfd_elf32_write_out_phdrs,
19714 bfd_elf32_write_shdrs_and_ehdr,
19715 bfd_elf32_checksum_contents,
19716 bfd_elf32_write_relocs,
19717 elf32_arm_swap_symbol_in,
19718 elf32_arm_swap_symbol_out,
19719 bfd_elf32_slurp_reloc_table,
19720 bfd_elf32_slurp_symbol_table,
19721 bfd_elf32_swap_dyn_in,
19722 bfd_elf32_swap_dyn_out,
19723 bfd_elf32_swap_reloc_in,
19724 bfd_elf32_swap_reloc_out,
19725 bfd_elf32_swap_reloca_in,
19726 bfd_elf32_swap_reloca_out
19730 read_code32 (const bfd *abfd, const bfd_byte *addr)
19732 /* V7 BE8 code is always little endian. */
19733 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19734 return bfd_getl32 (addr);
19736 return bfd_get_32 (abfd, addr);
19740 read_code16 (const bfd *abfd, const bfd_byte *addr)
19742 /* V7 BE8 code is always little endian. */
19743 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19744 return bfd_getl16 (addr);
19746 return bfd_get_16 (abfd, addr);
19749 /* Return size of plt0 entry starting at ADDR
19750 or (bfd_vma) -1 if size can not be determined. */
19753 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19755 bfd_vma first_word;
19758 first_word = read_code32 (abfd, addr);
19760 if (first_word == elf32_arm_plt0_entry[0])
19761 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19762 else if (first_word == elf32_thumb2_plt0_entry[0])
19763 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19765 /* We don't yet handle this PLT format. */
19766 return (bfd_vma) -1;
19771 /* Return size of plt entry starting at offset OFFSET
19772 of plt section located at address START
19773 or (bfd_vma) -1 if size can not be determined. */
19776 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19778 bfd_vma first_insn;
19779 bfd_vma plt_size = 0;
19780 const bfd_byte *addr = start + offset;
19782 /* PLT entry size if fixed on Thumb-only platforms. */
19783 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19784 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19786 /* Respect Thumb stub if necessary. */
19787 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19789 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
19792 /* Strip immediate from first add. */
19793 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19795 #ifdef FOUR_WORD_PLT
19796 if (first_insn == elf32_arm_plt_entry[0])
19797 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19799 if (first_insn == elf32_arm_plt_entry_long[0])
19800 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19801 else if (first_insn == elf32_arm_plt_entry_short[0])
19802 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19805 /* We don't yet handle this PLT format. */
19806 return (bfd_vma) -1;
19811 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19814 elf32_arm_get_synthetic_symtab (bfd *abfd,
19815 long symcount ATTRIBUTE_UNUSED,
19816 asymbol **syms ATTRIBUTE_UNUSED,
19826 Elf_Internal_Shdr *hdr;
19834 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19837 if (dynsymcount <= 0)
19840 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19841 if (relplt == NULL)
19844 hdr = &elf_section_data (relplt)->this_hdr;
19845 if (hdr->sh_link != elf_dynsymtab (abfd)
19846 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19849 plt = bfd_get_section_by_name (abfd, ".plt");
19853 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19856 data = plt->contents;
19859 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19861 bfd_cache_section_contents((asection *) plt, data);
19864 count = relplt->size / hdr->sh_entsize;
19865 size = count * sizeof (asymbol);
19866 p = relplt->relocation;
19867 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19869 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19870 if (p->addend != 0)
19871 size += sizeof ("+0x") - 1 + 8;
19874 s = *ret = (asymbol *) bfd_malloc (size);
19878 offset = elf32_arm_plt0_size (abfd, data);
19879 if (offset == (bfd_vma) -1)
19882 names = (char *) (s + count);
19883 p = relplt->relocation;
19885 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19889 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
19890 if (plt_size == (bfd_vma) -1)
19893 *s = **p->sym_ptr_ptr;
19894 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
19895 we are defining a symbol, ensure one of them is set. */
19896 if ((s->flags & BSF_LOCAL) == 0)
19897 s->flags |= BSF_GLOBAL;
19898 s->flags |= BSF_SYNTHETIC;
19903 len = strlen ((*p->sym_ptr_ptr)->name);
19904 memcpy (names, (*p->sym_ptr_ptr)->name, len);
19906 if (p->addend != 0)
19910 memcpy (names, "+0x", sizeof ("+0x") - 1);
19911 names += sizeof ("+0x") - 1;
19912 bfd_sprintf_vma (abfd, buf, p->addend);
19913 for (a = buf; *a == '0'; ++a)
19916 memcpy (names, a, len);
19919 memcpy (names, "@plt", sizeof ("@plt"));
19920 names += sizeof ("@plt");
19922 offset += plt_size;
19929 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
19931 if (hdr->sh_flags & SHF_ARM_PURECODE)
19932 hdr->bfd_section->flags |= SEC_ELF_PURECODE;
19937 elf32_arm_lookup_section_flags (char *flag_name)
19939 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
19940 return SHF_ARM_PURECODE;
19942 return SEC_NO_FLAGS;
19945 static unsigned int
19946 elf32_arm_count_additional_relocs (asection *sec)
19948 struct _arm_elf_section_data *arm_data;
19949 arm_data = get_arm_elf_section_data (sec);
19951 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
19954 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
19955 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
19956 FALSE otherwise. ISECTION is the best guess matching section from the
19957 input bfd IBFD, but it might be NULL. */
19960 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
19961 bfd *obfd ATTRIBUTE_UNUSED,
19962 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
19963 Elf_Internal_Shdr *osection)
19965 switch (osection->sh_type)
19967 case SHT_ARM_EXIDX:
19969 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
19970 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
19973 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
19974 osection->sh_info = 0;
19976 /* The sh_link field must be set to the text section associated with
19977 this index section. Unfortunately the ARM EHABI does not specify
19978 exactly how to determine this association. Our caller does try
19979 to match up OSECTION with its corresponding input section however
19980 so that is a good first guess. */
19981 if (isection != NULL
19982 && osection->bfd_section != NULL
19983 && isection->bfd_section != NULL
19984 && isection->bfd_section->output_section != NULL
19985 && isection->bfd_section->output_section == osection->bfd_section
19986 && iheaders != NULL
19987 && isection->sh_link > 0
19988 && isection->sh_link < elf_numsections (ibfd)
19989 && iheaders[isection->sh_link]->bfd_section != NULL
19990 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
19993 for (i = elf_numsections (obfd); i-- > 0;)
19994 if (oheaders[i]->bfd_section
19995 == iheaders[isection->sh_link]->bfd_section->output_section)
20001 /* Failing that we have to find a matching section ourselves. If
20002 we had the output section name available we could compare that
20003 with input section names. Unfortunately we don't. So instead
20004 we use a simple heuristic and look for the nearest executable
20005 section before this one. */
20006 for (i = elf_numsections (obfd); i-- > 0;)
20007 if (oheaders[i] == osection)
20013 if (oheaders[i]->sh_type == SHT_PROGBITS
20014 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20015 == (SHF_ALLOC | SHF_EXECINSTR))
20021 osection->sh_link = i;
20022 /* If the text section was part of a group
20023 then the index section should be too. */
20024 if (oheaders[i]->sh_flags & SHF_GROUP)
20025 osection->sh_flags |= SHF_GROUP;
20031 case SHT_ARM_PREEMPTMAP:
20032 osection->sh_flags = SHF_ALLOC;
20035 case SHT_ARM_ATTRIBUTES:
20036 case SHT_ARM_DEBUGOVERLAY:
20037 case SHT_ARM_OVERLAYSECTION:
20045 /* Returns TRUE if NAME is an ARM mapping symbol.
20046 Traditionally the symbols $a, $d and $t have been used.
20047 The ARM ELF standard also defines $x (for A64 code). It also allows a
20048 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20049 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20050 not support them here. $t.x indicates the start of ThumbEE instructions. */
20053 is_arm_mapping_symbol (const char * name)
20055 return name != NULL /* Paranoia. */
20056 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20057 the mapping symbols could have acquired a prefix.
20058 We do not support this here, since such symbols no
20059 longer conform to the ARM ELF ABI. */
20060 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20061 && (name[2] == 0 || name[2] == '.');
20062 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20063 any characters that follow the period are legal characters for the body
20064 of a symbol's name. For now we just assume that this is the case. */
20067 /* Make sure that mapping symbols in object files are not removed via the
20068 "strip --strip-unneeded" tool. These symbols are needed in order to
20069 correctly generate interworking veneers, and for byte swapping code
20070 regions. Once an object file has been linked, it is safe to remove the
20071 symbols as they will no longer be needed. */
20074 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20076 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20077 && sym->section != bfd_abs_section_ptr
20078 && is_arm_mapping_symbol (sym->name))
20079 sym->flags |= BSF_KEEP;
20082 #undef elf_backend_copy_special_section_fields
20083 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20085 #define ELF_ARCH bfd_arch_arm
20086 #define ELF_TARGET_ID ARM_ELF_DATA
20087 #define ELF_MACHINE_CODE EM_ARM
20088 #ifdef __QNXTARGET__
20089 #define ELF_MAXPAGESIZE 0x1000
20091 #define ELF_MAXPAGESIZE 0x10000
20093 #define ELF_MINPAGESIZE 0x1000
20094 #define ELF_COMMONPAGESIZE 0x1000
20096 #define bfd_elf32_mkobject elf32_arm_mkobject
20098 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20099 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20100 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20101 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20102 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20103 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20104 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20105 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20106 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20107 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20108 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20109 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20111 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20112 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20113 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20114 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20115 #define elf_backend_check_relocs elf32_arm_check_relocs
20116 #define elf_backend_update_relocs elf32_arm_update_relocs
20117 #define elf_backend_relocate_section elf32_arm_relocate_section
20118 #define elf_backend_write_section elf32_arm_write_section
20119 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20120 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20121 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20122 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20123 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20124 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20125 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20126 #define elf_backend_init_file_header elf32_arm_init_file_header
20127 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20128 #define elf_backend_object_p elf32_arm_object_p
20129 #define elf_backend_fake_sections elf32_arm_fake_sections
20130 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20131 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20132 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20133 #define elf_backend_size_info elf32_arm_size_info
20134 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20135 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20136 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20137 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20138 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20139 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20140 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20141 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20143 #define elf_backend_can_refcount 1
20144 #define elf_backend_can_gc_sections 1
20145 #define elf_backend_plt_readonly 1
20146 #define elf_backend_want_got_plt 1
20147 #define elf_backend_want_plt_sym 0
20148 #define elf_backend_want_dynrelro 1
20149 #define elf_backend_may_use_rel_p 1
20150 #define elf_backend_may_use_rela_p 0
20151 #define elf_backend_default_use_rela_p 0
20152 #define elf_backend_dtrel_excludes_plt 1
20154 #define elf_backend_got_header_size 12
20155 #define elf_backend_extern_protected_data 1
20157 #undef elf_backend_obj_attrs_vendor
20158 #define elf_backend_obj_attrs_vendor "aeabi"
20159 #undef elf_backend_obj_attrs_section
20160 #define elf_backend_obj_attrs_section ".ARM.attributes"
20161 #undef elf_backend_obj_attrs_arg_type
20162 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20163 #undef elf_backend_obj_attrs_section_type
20164 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20165 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20166 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20168 #undef elf_backend_section_flags
20169 #define elf_backend_section_flags elf32_arm_section_flags
20170 #undef elf_backend_lookup_section_flags_hook
20171 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20173 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20175 #include "elf32-target.h"
20177 /* Native Client targets. */
20179 #undef TARGET_LITTLE_SYM
20180 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20181 #undef TARGET_LITTLE_NAME
20182 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20183 #undef TARGET_BIG_SYM
20184 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20185 #undef TARGET_BIG_NAME
20186 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20188 /* Like elf32_arm_link_hash_table_create -- but overrides
20189 appropriately for NaCl. */
20191 static struct bfd_link_hash_table *
20192 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20194 struct bfd_link_hash_table *ret;
20196 ret = elf32_arm_link_hash_table_create (abfd);
20199 struct elf32_arm_link_hash_table *htab
20200 = (struct elf32_arm_link_hash_table *) ret;
20202 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20203 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20208 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20209 really need to use elf32_arm_modify_segment_map. But we do it
20210 anyway just to reduce gratuitous differences with the stock ARM backend. */
20213 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20215 return (elf32_arm_modify_segment_map (abfd, info)
20216 && nacl_modify_segment_map (abfd, info));
20220 elf32_arm_nacl_final_write_processing (bfd *abfd)
20222 arm_final_write_processing (abfd);
20223 return nacl_final_write_processing (abfd);
20227 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20228 const arelent *rel ATTRIBUTE_UNUSED)
20231 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20232 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20236 #define elf32_bed elf32_arm_nacl_bed
20237 #undef bfd_elf32_bfd_link_hash_table_create
20238 #define bfd_elf32_bfd_link_hash_table_create \
20239 elf32_arm_nacl_link_hash_table_create
20240 #undef elf_backend_plt_alignment
20241 #define elf_backend_plt_alignment 4
20242 #undef elf_backend_modify_segment_map
20243 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20244 #undef elf_backend_modify_headers
20245 #define elf_backend_modify_headers nacl_modify_headers
20246 #undef elf_backend_final_write_processing
20247 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20248 #undef bfd_elf32_get_synthetic_symtab
20249 #undef elf_backend_plt_sym_val
20250 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20251 #undef elf_backend_copy_special_section_fields
20253 #undef ELF_MINPAGESIZE
20254 #undef ELF_COMMONPAGESIZE
20256 #undef ELF_TARGET_OS
20257 #define ELF_TARGET_OS is_nacl
20259 #include "elf32-target.h"
20261 /* Reset to defaults. */
20262 #undef elf_backend_plt_alignment
20263 #undef elf_backend_modify_segment_map
20264 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20265 #undef elf_backend_modify_headers
20266 #undef elf_backend_final_write_processing
20267 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20268 #undef ELF_MINPAGESIZE
20269 #define ELF_MINPAGESIZE 0x1000
20270 #undef ELF_COMMONPAGESIZE
20271 #define ELF_COMMONPAGESIZE 0x1000
20274 /* FDPIC Targets. */
20276 #undef TARGET_LITTLE_SYM
20277 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20278 #undef TARGET_LITTLE_NAME
20279 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20280 #undef TARGET_BIG_SYM
20281 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20282 #undef TARGET_BIG_NAME
20283 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20284 #undef elf_match_priority
20285 #define elf_match_priority 128
20287 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20289 /* Like elf32_arm_link_hash_table_create -- but overrides
20290 appropriately for FDPIC. */
20292 static struct bfd_link_hash_table *
20293 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20295 struct bfd_link_hash_table *ret;
20297 ret = elf32_arm_link_hash_table_create (abfd);
20300 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20307 /* We need dynamic symbols for every section, since segments can
20308 relocate independently. */
20310 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20311 struct bfd_link_info *info
20313 asection *p ATTRIBUTE_UNUSED)
20315 switch (elf_section_data (p)->this_hdr.sh_type)
20319 /* If sh_type is yet undecided, assume it could be
20320 SHT_PROGBITS/SHT_NOBITS. */
20324 /* There shouldn't be section relative relocations
20325 against any other section. */
20332 #define elf32_bed elf32_arm_fdpic_bed
20334 #undef bfd_elf32_bfd_link_hash_table_create
20335 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20337 #undef elf_backend_omit_section_dynsym
20338 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20340 #undef ELF_TARGET_OS
20342 #include "elf32-target.h"
20344 #undef elf_match_priority
20346 #undef elf_backend_omit_section_dynsym
20348 /* VxWorks Targets. */
20350 #undef TARGET_LITTLE_SYM
20351 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20352 #undef TARGET_LITTLE_NAME
20353 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20354 #undef TARGET_BIG_SYM
20355 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20356 #undef TARGET_BIG_NAME
20357 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20359 /* Like elf32_arm_link_hash_table_create -- but overrides
20360 appropriately for VxWorks. */
20362 static struct bfd_link_hash_table *
20363 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20365 struct bfd_link_hash_table *ret;
20367 ret = elf32_arm_link_hash_table_create (abfd);
20370 struct elf32_arm_link_hash_table *htab
20371 = (struct elf32_arm_link_hash_table *) ret;
20378 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20380 arm_final_write_processing (abfd);
20381 return elf_vxworks_final_write_processing (abfd);
20385 #define elf32_bed elf32_arm_vxworks_bed
20387 #undef bfd_elf32_bfd_link_hash_table_create
20388 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20389 #undef elf_backend_final_write_processing
20390 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20391 #undef elf_backend_emit_relocs
20392 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20394 #undef elf_backend_may_use_rel_p
20395 #define elf_backend_may_use_rel_p 0
20396 #undef elf_backend_may_use_rela_p
20397 #define elf_backend_may_use_rela_p 1
20398 #undef elf_backend_default_use_rela_p
20399 #define elf_backend_default_use_rela_p 1
20400 #undef elf_backend_want_plt_sym
20401 #define elf_backend_want_plt_sym 1
20402 #undef ELF_MAXPAGESIZE
20403 #define ELF_MAXPAGESIZE 0x1000
20404 #undef ELF_TARGET_OS
20405 #define ELF_TARGET_OS is_vxworks
20407 #include "elf32-target.h"
20410 /* Merge backend specific data from an object file to the output
20411 object file when linking. */
20414 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20416 bfd *obfd = info->output_bfd;
20417 flagword out_flags;
20419 bfd_boolean flags_compatible = TRUE;
20422 /* Check if we have the same endianness. */
20423 if (! _bfd_generic_verify_endian_match (ibfd, info))
20426 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20429 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20432 /* The input BFD must have had its flags initialised. */
20433 /* The following seems bogus to me -- The flags are initialized in
20434 the assembler but I don't think an elf_flags_init field is
20435 written into the object. */
20436 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20438 in_flags = elf_elfheader (ibfd)->e_flags;
20439 out_flags = elf_elfheader (obfd)->e_flags;
20441 /* In theory there is no reason why we couldn't handle this. However
20442 in practice it isn't even close to working and there is no real
20443 reason to want it. */
20444 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20445 && !(ibfd->flags & DYNAMIC)
20446 && (in_flags & EF_ARM_BE8))
20448 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20453 if (!elf_flags_init (obfd))
20455 /* If the input is the default architecture and had the default
20456 flags then do not bother setting the flags for the output
20457 architecture, instead allow future merges to do this. If no
20458 future merges ever set these flags then they will retain their
20459 uninitialised values, which surprise surprise, correspond
20460 to the default values. */
20461 if (bfd_get_arch_info (ibfd)->the_default
20462 && elf_elfheader (ibfd)->e_flags == 0)
20465 elf_flags_init (obfd) = TRUE;
20466 elf_elfheader (obfd)->e_flags = in_flags;
20468 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20469 && bfd_get_arch_info (obfd)->the_default)
20470 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20475 /* Determine what should happen if the input ARM architecture
20476 does not match the output ARM architecture. */
20477 if (! bfd_arm_merge_machines (ibfd, obfd))
20480 /* Identical flags must be compatible. */
20481 if (in_flags == out_flags)
20484 /* Check to see if the input BFD actually contains any sections. If
20485 not, its flags may not have been initialised either, but it
20486 cannot actually cause any incompatiblity. Do not short-circuit
20487 dynamic objects; their section list may be emptied by
20488 elf_link_add_object_symbols.
20490 Also check to see if there are no code sections in the input.
20491 In this case there is no need to check for code specific flags.
20492 XXX - do we need to worry about floating-point format compatability
20493 in data sections ? */
20494 if (!(ibfd->flags & DYNAMIC))
20496 bfd_boolean null_input_bfd = TRUE;
20497 bfd_boolean only_data_sections = TRUE;
20499 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20501 /* Ignore synthetic glue sections. */
20502 if (strcmp (sec->name, ".glue_7")
20503 && strcmp (sec->name, ".glue_7t"))
20505 if ((bfd_section_flags (sec)
20506 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20507 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20508 only_data_sections = FALSE;
20510 null_input_bfd = FALSE;
20515 if (null_input_bfd || only_data_sections)
20519 /* Complain about various flag mismatches. */
20520 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20521 EF_ARM_EABI_VERSION (out_flags)))
20524 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20525 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20526 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20530 /* Not sure what needs to be checked for EABI versions >= 1. */
20531 /* VxWorks libraries do not use these flags. */
20532 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20533 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20534 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20536 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20539 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20540 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20541 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20542 flags_compatible = FALSE;
20545 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20547 if (in_flags & EF_ARM_APCS_FLOAT)
20549 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20553 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20556 flags_compatible = FALSE;
20559 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20561 if (in_flags & EF_ARM_VFP_FLOAT)
20563 (_("error: %pB uses %s instructions, whereas %pB does not"),
20564 ibfd, "VFP", obfd);
20567 (_("error: %pB uses %s instructions, whereas %pB does not"),
20568 ibfd, "FPA", obfd);
20570 flags_compatible = FALSE;
20573 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20575 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20577 (_("error: %pB uses %s instructions, whereas %pB does not"),
20578 ibfd, "Maverick", obfd);
20581 (_("error: %pB does not use %s instructions, whereas %pB does"),
20582 ibfd, "Maverick", obfd);
20584 flags_compatible = FALSE;
20587 #ifdef EF_ARM_SOFT_FLOAT
20588 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20590 /* We can allow interworking between code that is VFP format
20591 layout, and uses either soft float or integer regs for
20592 passing floating point arguments and results. We already
20593 know that the APCS_FLOAT flags match; similarly for VFP
20595 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20596 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20598 if (in_flags & EF_ARM_SOFT_FLOAT)
20600 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20604 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20607 flags_compatible = FALSE;
20612 /* Interworking mismatch is only a warning. */
20613 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20615 if (in_flags & EF_ARM_INTERWORK)
20618 (_("warning: %pB supports interworking, whereas %pB does not"),
20624 (_("warning: %pB does not support interworking, whereas %pB does"),
20630 return flags_compatible;