1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2014 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1[] =
80 HOWTO (R_ARM_NONE, /* type */
82 0, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE, /* pc_relative */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
92 FALSE), /* pcrel_offset */
94 HOWTO (R_ARM_PC24, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE, /* pc_relative */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE, /* pc_relative */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE, /* pc_relative */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE, /* pc_relative */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE, /* pc_relative */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE, /* pc_relative */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE, /* pc_relative */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE, /* pc_relative */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE, /* pc_relative */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE, /* pc_relative */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE, /* pc_relative */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE, /* pc_relative */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE, /* pc_relative */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE, /* pc_relative */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE, /* pc_relative */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE, /* pc_relative */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE, /* pc_relative */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE, /* pc_relative */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE, /* pc_relative */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE, /* pc_relative */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE, /* pc_relative */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE, /* pc_relative */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE, /* pc_relative */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE, /* pc_relative */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE, /* pc_relative */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE, /* pc_relative */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE, /* pc_relative */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
484 HOWTO (R_ARM_CALL, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE, /* pc_relative */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE, /* pc_relative */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE, /* pc_relative */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE, /* pc_relative */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE, /* pc_relative */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE, /* pc_relative */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE, /* pc_relative */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE, /* pc_relative */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE, /* pc_relative */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE, /* pc_relative */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE, /* pc_relative */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE, /* pc_relative */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE, /* pc_relative */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE, /* pc_relative */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE, /* pc_relative */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE, /* pc_relative */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE, /* pc_relative */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE, /* pc_relative */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE, /* pc_relative */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE, /* pc_relative */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE, /* pc_relative */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE, /* pc_relative */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE, /* pc_relative */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE, /* pc_relative */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE, /* pc_relative */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE, /* pc_relative */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE, /* pc_relative */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE, /* pc_relative */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE, /* pc_relative */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE, /* pc_relative */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE, /* pc_relative */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE, /* pc_relative */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE, /* pc_relative */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE, /* pc_relative */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE, /* pc_relative */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE, /* pc_relative */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE, /* pc_relative */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE, /* pc_relative */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE, /* pc_relative */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE, /* pc_relative */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE, /* pc_relative */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE, /* pc_relative */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE, /* pc_relative */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE, /* pc_relative */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE, /* pc_relative */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE, /* pc_relative */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE, /* pc_relative */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE, /* pc_relative */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE, /* pc_relative */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE, /* pc_relative */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE, /* pc_relative */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE, /* pc_relative */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE, /* pc_relative */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE, /* pc_relative */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE, /* pc_relative */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE, /* pc_relative */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE, /* pc_relative */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE, /* pc_relative */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE, /* pc_relative */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE, /* pc_relative */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE, /* pc_relative */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE, /* pc_relative */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE, /* pc_relative */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE, /* pc_relative */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE, /* pc_relative */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE, /* pc_relative */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE, /* pc_relative */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE, /* pc_relative */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE, /* pc_relative */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE, /* pc_relative */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE, /* pc_relative */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE, /* pc_relative */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1500 FALSE), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE, /* pc_relative */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1515 FALSE), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE, /* pc_relative */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE, /* pc_relative */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE, /* pc_relative */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE, /* pc_relative */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE, /* pc_relative */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE, /* pc_relative */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE, /* pc_relative */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 bfd_elf_generic_reloc, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE, /* pc_relative */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE, /* pc_relative */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE, /* pc_relative */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE, /* pc_relative */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1695 static reloc_howto_type elf32_arm_howto_table_2[1] =
1697 HOWTO (R_ARM_IRELATIVE, /* type */
1699 2, /* size (0 = byte, 1 = short, 2 = long) */
1701 FALSE, /* pc_relative */
1703 complain_overflow_bitfield,/* complain_on_overflow */
1704 bfd_elf_generic_reloc, /* special_function */
1705 "R_ARM_IRELATIVE", /* name */
1706 TRUE, /* partial_inplace */
1707 0xffffffff, /* src_mask */
1708 0xffffffff, /* dst_mask */
1709 FALSE) /* pcrel_offset */
1712 /* 249-255 extended, currently unused, relocations: */
1713 static reloc_howto_type elf32_arm_howto_table_3[4] =
1715 HOWTO (R_ARM_RREL32, /* type */
1717 0, /* size (0 = byte, 1 = short, 2 = long) */
1719 FALSE, /* pc_relative */
1721 complain_overflow_dont,/* complain_on_overflow */
1722 bfd_elf_generic_reloc, /* special_function */
1723 "R_ARM_RREL32", /* name */
1724 FALSE, /* partial_inplace */
1727 FALSE), /* pcrel_offset */
1729 HOWTO (R_ARM_RABS32, /* type */
1731 0, /* size (0 = byte, 1 = short, 2 = long) */
1733 FALSE, /* pc_relative */
1735 complain_overflow_dont,/* complain_on_overflow */
1736 bfd_elf_generic_reloc, /* special_function */
1737 "R_ARM_RABS32", /* name */
1738 FALSE, /* partial_inplace */
1741 FALSE), /* pcrel_offset */
1743 HOWTO (R_ARM_RPC24, /* type */
1745 0, /* size (0 = byte, 1 = short, 2 = long) */
1747 FALSE, /* pc_relative */
1749 complain_overflow_dont,/* complain_on_overflow */
1750 bfd_elf_generic_reloc, /* special_function */
1751 "R_ARM_RPC24", /* name */
1752 FALSE, /* partial_inplace */
1755 FALSE), /* pcrel_offset */
1757 HOWTO (R_ARM_RBASE, /* type */
1759 0, /* size (0 = byte, 1 = short, 2 = long) */
1761 FALSE, /* pc_relative */
1763 complain_overflow_dont,/* complain_on_overflow */
1764 bfd_elf_generic_reloc, /* special_function */
1765 "R_ARM_RBASE", /* name */
1766 FALSE, /* partial_inplace */
1769 FALSE) /* pcrel_offset */
1772 static reloc_howto_type *
1773 elf32_arm_howto_from_type (unsigned int r_type)
1775 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1776 return &elf32_arm_howto_table_1[r_type];
1778 if (r_type == R_ARM_IRELATIVE)
1779 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1781 if (r_type >= R_ARM_RREL32
1782 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1783 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1789 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1790 Elf_Internal_Rela * elf_reloc)
1792 unsigned int r_type;
1794 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1795 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1798 struct elf32_arm_reloc_map
1800 bfd_reloc_code_real_type bfd_reloc_val;
1801 unsigned char elf_reloc_val;
1804 /* All entries in this list must also be present in elf32_arm_howto_table. */
1805 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1807 {BFD_RELOC_NONE, R_ARM_NONE},
1808 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1809 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1810 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1811 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1812 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1813 {BFD_RELOC_32, R_ARM_ABS32},
1814 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1815 {BFD_RELOC_8, R_ARM_ABS8},
1816 {BFD_RELOC_16, R_ARM_ABS16},
1817 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1818 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1825 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1826 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1827 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1828 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1829 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1830 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1831 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1832 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1833 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1834 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1835 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1836 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1837 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1838 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1839 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1840 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1841 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1842 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1843 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1844 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1845 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1846 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1847 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1848 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1849 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1850 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1851 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1852 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1853 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1854 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1855 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1856 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1857 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1858 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1859 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1860 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1861 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1862 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1863 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1864 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1865 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1866 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1867 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1868 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1869 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1870 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1871 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1872 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1873 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1874 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1875 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1876 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1877 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1878 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1879 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1880 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1881 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1882 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1883 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1884 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1885 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1886 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1887 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1888 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1889 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1890 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1891 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1892 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1895 static reloc_howto_type *
1896 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1897 bfd_reloc_code_real_type code)
1901 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1902 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1903 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1908 static reloc_howto_type *
1909 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1914 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1915 if (elf32_arm_howto_table_1[i].name != NULL
1916 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1917 return &elf32_arm_howto_table_1[i];
1919 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1920 if (elf32_arm_howto_table_2[i].name != NULL
1921 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1922 return &elf32_arm_howto_table_2[i];
1924 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1925 if (elf32_arm_howto_table_3[i].name != NULL
1926 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1927 return &elf32_arm_howto_table_3[i];
1932 /* Support for core dump NOTE sections. */
1935 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1940 switch (note->descsz)
1945 case 148: /* Linux/ARM 32-bit. */
1947 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
1950 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
1959 /* Make a ".reg/999" section. */
1960 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1961 size, note->descpos + offset);
1965 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1967 switch (note->descsz)
1972 case 124: /* Linux/ARM elf_prpsinfo. */
1973 elf_tdata (abfd)->core->pid
1974 = bfd_get_32 (abfd, note->descdata + 12);
1975 elf_tdata (abfd)->core->program
1976 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1977 elf_tdata (abfd)->core->command
1978 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1981 /* Note that for some reason, a spurious space is tacked
1982 onto the end of the args in some (at least one anyway)
1983 implementations, so strip it off if it exists. */
1985 char *command = elf_tdata (abfd)->core->command;
1986 int n = strlen (command);
1988 if (0 < n && command[n - 1] == ' ')
1989 command[n - 1] = '\0';
1996 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2009 va_start (ap, note_type);
2010 memset (data, 0, sizeof (data));
2011 strncpy (data + 28, va_arg (ap, const char *), 16);
2012 strncpy (data + 44, va_arg (ap, const char *), 80);
2015 return elfcore_write_note (abfd, buf, bufsiz,
2016 "CORE", note_type, data, sizeof (data));
2027 va_start (ap, note_type);
2028 memset (data, 0, sizeof (data));
2029 pid = va_arg (ap, long);
2030 bfd_put_32 (abfd, pid, data + 24);
2031 cursig = va_arg (ap, int);
2032 bfd_put_16 (abfd, cursig, data + 12);
2033 greg = va_arg (ap, const void *);
2034 memcpy (data + 72, greg, 72);
2037 return elfcore_write_note (abfd, buf, bufsiz,
2038 "CORE", note_type, data, sizeof (data));
2043 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2044 #define TARGET_LITTLE_NAME "elf32-littlearm"
2045 #define TARGET_BIG_SYM arm_elf32_be_vec
2046 #define TARGET_BIG_NAME "elf32-bigarm"
2048 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2049 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2050 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2052 typedef unsigned long int insn32;
2053 typedef unsigned short int insn16;
2055 /* In lieu of proper flags, assume all EABIv4 or later objects are
2057 #define INTERWORK_FLAG(abfd) \
2058 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2059 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2060 || ((abfd)->flags & BFD_LINKER_CREATED))
2062 /* The linker script knows the section names for placement.
2063 The entry_names are used to do simple name mangling on the stubs.
2064 Given a function name, and its type, the stub can be found. The
2065 name can be changed. The only requirement is the %s be present. */
2066 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2067 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2069 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2070 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2072 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2073 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2075 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2076 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2078 #define STUB_ENTRY_NAME "__%s_veneer"
2080 /* The name of the dynamic interpreter. This is put in the .interp
2082 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2084 static const unsigned long tls_trampoline [] =
2086 0xe08e0000, /* add r0, lr, r0 */
2087 0xe5901004, /* ldr r1, [r0,#4] */
2088 0xe12fff11, /* bx r1 */
2091 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2093 0xe52d2004, /* push {r2} */
2094 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2095 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2096 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2097 0xe081100f, /* 2: add r1, pc */
2098 0xe12fff12, /* bx r2 */
2099 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2100 + dl_tlsdesc_lazy_resolver(GOT) */
2101 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2104 #ifdef FOUR_WORD_PLT
2106 /* The first entry in a procedure linkage table looks like
2107 this. It is set up so that any shared library function that is
2108 called before the relocation has been set up calls the dynamic
2110 static const bfd_vma elf32_arm_plt0_entry [] =
2112 0xe52de004, /* str lr, [sp, #-4]! */
2113 0xe59fe010, /* ldr lr, [pc, #16] */
2114 0xe08fe00e, /* add lr, pc, lr */
2115 0xe5bef008, /* ldr pc, [lr, #8]! */
2118 /* Subsequent entries in a procedure linkage table look like
2120 static const bfd_vma elf32_arm_plt_entry [] =
2122 0xe28fc600, /* add ip, pc, #NN */
2123 0xe28cca00, /* add ip, ip, #NN */
2124 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2125 0x00000000, /* unused */
2128 #else /* not FOUR_WORD_PLT */
2130 /* The first entry in a procedure linkage table looks like
2131 this. It is set up so that any shared library function that is
2132 called before the relocation has been set up calls the dynamic
2134 static const bfd_vma elf32_arm_plt0_entry [] =
2136 0xe52de004, /* str lr, [sp, #-4]! */
2137 0xe59fe004, /* ldr lr, [pc, #4] */
2138 0xe08fe00e, /* add lr, pc, lr */
2139 0xe5bef008, /* ldr pc, [lr, #8]! */
2140 0x00000000, /* &GOT[0] - . */
2143 /* By default subsequent entries in a procedure linkage table look like
2144 this. Offsets that don't fit into 28 bits will cause link error. */
2145 static const bfd_vma elf32_arm_plt_entry_short [] =
2147 0xe28fc600, /* add ip, pc, #0xNN00000 */
2148 0xe28cca00, /* add ip, ip, #0xNN000 */
2149 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2152 /* When explicitly asked, we'll use this "long" entry format
2153 which can cope with arbitrary displacements. */
2154 static const bfd_vma elf32_arm_plt_entry_long [] =
2156 0xe28fc200, /* add ip, pc, #0xN0000000 */
2157 0xe28cc600, /* add ip, ip, #0xNN00000 */
2158 0xe28cca00, /* add ip, ip, #0xNN000 */
2159 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2162 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2164 #endif /* not FOUR_WORD_PLT */
2166 /* The first entry in a procedure linkage table looks like this.
2167 It is set up so that any shared library function that is called before the
2168 relocation has been set up calls the dynamic linker first. */
2169 static const bfd_vma elf32_thumb2_plt0_entry [] =
2171 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2172 an instruction maybe encoded to one or two array elements. */
2173 0xf8dfb500, /* push {lr} */
2174 0x44fee008, /* ldr.w lr, [pc, #8] */
2176 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2177 0x00000000, /* &GOT[0] - . */
2180 /* Subsequent entries in a procedure linkage table for thumb only target
2182 static const bfd_vma elf32_thumb2_plt_entry [] =
2184 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2185 an instruction maybe encoded to one or two array elements. */
2186 0x0c00f240, /* movw ip, #0xNNNN */
2187 0x0c00f2c0, /* movt ip, #0xNNNN */
2188 0xf8dc44fc, /* add ip, pc */
2189 0xbf00f000 /* ldr.w pc, [ip] */
2193 /* The format of the first entry in the procedure linkage table
2194 for a VxWorks executable. */
2195 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2197 0xe52dc008, /* str ip,[sp,#-8]! */
2198 0xe59fc000, /* ldr ip,[pc] */
2199 0xe59cf008, /* ldr pc,[ip,#8] */
2200 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2203 /* The format of subsequent entries in a VxWorks executable. */
2204 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2206 0xe59fc000, /* ldr ip,[pc] */
2207 0xe59cf000, /* ldr pc,[ip] */
2208 0x00000000, /* .long @got */
2209 0xe59fc000, /* ldr ip,[pc] */
2210 0xea000000, /* b _PLT */
2211 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2214 /* The format of entries in a VxWorks shared library. */
2215 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2217 0xe59fc000, /* ldr ip,[pc] */
2218 0xe79cf009, /* ldr pc,[ip,r9] */
2219 0x00000000, /* .long @got */
2220 0xe59fc000, /* ldr ip,[pc] */
2221 0xe599f008, /* ldr pc,[r9,#8] */
2222 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2225 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2226 #define PLT_THUMB_STUB_SIZE 4
2227 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2233 /* The entries in a PLT when using a DLL-based target with multiple
2235 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2237 0xe51ff004, /* ldr pc, [pc, #-4] */
2238 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2241 /* The first entry in a procedure linkage table looks like
2242 this. It is set up so that any shared library function that is
2243 called before the relocation has been set up calls the dynamic
2245 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2248 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2249 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2250 0xe08cc00f, /* add ip, ip, pc */
2251 0xe52dc008, /* str ip, [sp, #-8]! */
2252 /* Second bundle: */
2253 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2254 0xe59cc000, /* ldr ip, [ip] */
2255 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2256 0xe12fff1c, /* bx ip */
2258 0xe320f000, /* nop */
2259 0xe320f000, /* nop */
2260 0xe320f000, /* nop */
2262 0xe50dc004, /* str ip, [sp, #-4] */
2263 /* Fourth bundle: */
2264 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2265 0xe59cc000, /* ldr ip, [ip] */
2266 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2267 0xe12fff1c, /* bx ip */
2269 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2271 /* Subsequent entries in a procedure linkage table look like this. */
2272 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2274 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2275 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2276 0xe08cc00f, /* add ip, ip, pc */
2277 0xea000000, /* b .Lplt_tail */
2280 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2281 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2282 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2283 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2284 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2285 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2295 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2296 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2297 is inserted in arm_build_one_stub(). */
2298 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2299 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2300 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2301 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2302 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2303 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2308 enum stub_insn_type type;
2309 unsigned int r_type;
2313 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2314 to reach the stub if necessary. */
2315 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2317 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2318 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2321 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2323 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2325 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2326 ARM_INSN (0xe12fff1c), /* bx ip */
2327 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2330 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2331 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2333 THUMB16_INSN (0xb401), /* push {r0} */
2334 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2335 THUMB16_INSN (0x4684), /* mov ip, r0 */
2336 THUMB16_INSN (0xbc01), /* pop {r0} */
2337 THUMB16_INSN (0x4760), /* bx ip */
2338 THUMB16_INSN (0xbf00), /* nop */
2339 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2342 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2344 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2346 THUMB16_INSN (0x4778), /* bx pc */
2347 THUMB16_INSN (0x46c0), /* nop */
2348 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2349 ARM_INSN (0xe12fff1c), /* bx ip */
2350 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2353 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2355 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2357 THUMB16_INSN (0x4778), /* bx pc */
2358 THUMB16_INSN (0x46c0), /* nop */
2359 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2360 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2363 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2364 one, when the destination is close enough. */
2365 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2367 THUMB16_INSN (0x4778), /* bx pc */
2368 THUMB16_INSN (0x46c0), /* nop */
2369 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2372 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2373 blx to reach the stub if necessary. */
2374 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2376 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2377 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2378 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2381 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2382 blx to reach the stub if necessary. We can not add into pc;
2383 it is not guaranteed to mode switch (different in ARMv6 and
2385 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2387 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2388 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2393 /* V4T ARM -> ARM long branch stub, PIC. */
2394 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2396 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2397 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2398 ARM_INSN (0xe12fff1c), /* bx ip */
2399 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2402 /* V4T Thumb -> ARM long branch stub, PIC. */
2403 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2405 THUMB16_INSN (0x4778), /* bx pc */
2406 THUMB16_INSN (0x46c0), /* nop */
2407 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2408 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2409 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2412 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2414 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2416 THUMB16_INSN (0xb401), /* push {r0} */
2417 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2418 THUMB16_INSN (0x46fc), /* mov ip, pc */
2419 THUMB16_INSN (0x4484), /* add ip, r0 */
2420 THUMB16_INSN (0xbc01), /* pop {r0} */
2421 THUMB16_INSN (0x4760), /* bx ip */
2422 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2425 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2427 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2429 THUMB16_INSN (0x4778), /* bx pc */
2430 THUMB16_INSN (0x46c0), /* nop */
2431 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2432 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2433 ARM_INSN (0xe12fff1c), /* bx ip */
2434 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2437 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2438 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2439 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2441 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2442 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2443 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2446 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2447 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2448 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2450 THUMB16_INSN (0x4778), /* bx pc */
2451 THUMB16_INSN (0x46c0), /* nop */
2452 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2453 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2454 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2457 /* NaCl ARM -> ARM long branch stub. */
2458 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2460 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2461 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2462 ARM_INSN (0xe12fff1c), /* bx ip */
2463 ARM_INSN (0xe320f000), /* nop */
2464 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2465 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2466 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2467 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2470 /* NaCl ARM -> ARM long branch stub, PIC. */
2471 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2473 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2474 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2475 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2476 ARM_INSN (0xe12fff1c), /* bx ip */
2477 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2478 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2479 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2480 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2484 /* Cortex-A8 erratum-workaround stubs. */
2486 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2487 can't use a conditional branch to reach this stub). */
2489 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2491 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2492 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2493 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2496 /* Stub used for b.w and bl.w instructions. */
2498 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2500 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2503 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2505 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2508 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2509 instruction (which switches to ARM mode) to point to this stub. Jump to the
2510 real destination using an ARM-mode branch. */
2512 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2514 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2517 /* For each section group there can be a specially created linker section
2518 to hold the stubs for that group. The name of the stub section is based
2519 upon the name of another section within that group with the suffix below
2522 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2523 create what appeared to be a linker stub section when it actually
2524 contained user code/data. For example, consider this fragment:
2526 const char * stubborn_problems[] = { "np" };
2528 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2531 .data.rel.local.stubborn_problems
2533 This then causes problems in arm32_arm_build_stubs() as it triggers:
2535 // Ignore non-stub sections.
2536 if (!strstr (stub_sec->name, STUB_SUFFIX))
2539 And so the section would be ignored instead of being processed. Hence
2540 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2542 #define STUB_SUFFIX ".__stub"
2544 /* One entry per long/short branch stub defined above. */
2546 DEF_STUB(long_branch_any_any) \
2547 DEF_STUB(long_branch_v4t_arm_thumb) \
2548 DEF_STUB(long_branch_thumb_only) \
2549 DEF_STUB(long_branch_v4t_thumb_thumb) \
2550 DEF_STUB(long_branch_v4t_thumb_arm) \
2551 DEF_STUB(short_branch_v4t_thumb_arm) \
2552 DEF_STUB(long_branch_any_arm_pic) \
2553 DEF_STUB(long_branch_any_thumb_pic) \
2554 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2555 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2556 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2557 DEF_STUB(long_branch_thumb_only_pic) \
2558 DEF_STUB(long_branch_any_tls_pic) \
2559 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2560 DEF_STUB(long_branch_arm_nacl) \
2561 DEF_STUB(long_branch_arm_nacl_pic) \
2562 DEF_STUB(a8_veneer_b_cond) \
2563 DEF_STUB(a8_veneer_b) \
2564 DEF_STUB(a8_veneer_bl) \
2565 DEF_STUB(a8_veneer_blx)
2567 #define DEF_STUB(x) arm_stub_##x,
2568 enum elf32_arm_stub_type
2572 /* Note the first a8_veneer type. */
2573 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2579 const insn_sequence* template_sequence;
2583 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2584 static const stub_def stub_definitions[] =
2590 struct elf32_arm_stub_hash_entry
2592 /* Base hash table entry structure. */
2593 struct bfd_hash_entry root;
2595 /* The stub section. */
2598 /* Offset within stub_sec of the beginning of this stub. */
2599 bfd_vma stub_offset;
2601 /* Given the symbol's value and its section we can determine its final
2602 value when building the stubs (so the stub knows where to jump). */
2603 bfd_vma target_value;
2604 asection *target_section;
2606 /* Offset to apply to relocation referencing target_value. */
2607 bfd_vma target_addend;
2609 /* The instruction which caused this stub to be generated (only valid for
2610 Cortex-A8 erratum workaround stubs at present). */
2611 unsigned long orig_insn;
2613 /* The stub type. */
2614 enum elf32_arm_stub_type stub_type;
2615 /* Its encoding size in bytes. */
2618 const insn_sequence *stub_template;
2619 /* The size of the template (number of entries). */
2620 int stub_template_size;
2622 /* The symbol table entry, if any, that this was derived from. */
2623 struct elf32_arm_link_hash_entry *h;
2625 /* Type of branch. */
2626 enum arm_st_branch_type branch_type;
2628 /* Where this stub is being called from, or, in the case of combined
2629 stub sections, the first input section in the group. */
2632 /* The name for the local symbol at the start of this stub. The
2633 stub name in the hash table has to be unique; this does not, so
2634 it can be friendlier. */
2638 /* Used to build a map of a section. This is required for mixed-endian
2641 typedef struct elf32_elf_section_map
2646 elf32_arm_section_map;
2648 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2652 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2653 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2654 VFP11_ERRATUM_ARM_VENEER,
2655 VFP11_ERRATUM_THUMB_VENEER
2657 elf32_vfp11_erratum_type;
2659 typedef struct elf32_vfp11_erratum_list
2661 struct elf32_vfp11_erratum_list *next;
2667 struct elf32_vfp11_erratum_list *veneer;
2668 unsigned int vfp_insn;
2672 struct elf32_vfp11_erratum_list *branch;
2676 elf32_vfp11_erratum_type type;
2678 elf32_vfp11_erratum_list;
2683 INSERT_EXIDX_CANTUNWIND_AT_END
2685 arm_unwind_edit_type;
2687 /* A (sorted) list of edits to apply to an unwind table. */
2688 typedef struct arm_unwind_table_edit
2690 arm_unwind_edit_type type;
2691 /* Note: we sometimes want to insert an unwind entry corresponding to a
2692 section different from the one we're currently writing out, so record the
2693 (text) section this edit relates to here. */
2694 asection *linked_section;
2696 struct arm_unwind_table_edit *next;
2698 arm_unwind_table_edit;
2700 typedef struct _arm_elf_section_data
2702 /* Information about mapping symbols. */
2703 struct bfd_elf_section_data elf;
2704 unsigned int mapcount;
2705 unsigned int mapsize;
2706 elf32_arm_section_map *map;
2707 /* Information about CPU errata. */
2708 unsigned int erratumcount;
2709 elf32_vfp11_erratum_list *erratumlist;
2710 /* Information about unwind tables. */
2713 /* Unwind info attached to a text section. */
2716 asection *arm_exidx_sec;
2719 /* Unwind info attached to an .ARM.exidx section. */
2722 arm_unwind_table_edit *unwind_edit_list;
2723 arm_unwind_table_edit *unwind_edit_tail;
2727 _arm_elf_section_data;
2729 #define elf32_arm_section_data(sec) \
2730 ((_arm_elf_section_data *) elf_section_data (sec))
2732 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2733 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2734 so may be created multiple times: we use an array of these entries whilst
2735 relaxing which we can refresh easily, then create stubs for each potentially
2736 erratum-triggering instruction once we've settled on a solution. */
2738 struct a8_erratum_fix
2744 unsigned long orig_insn;
2746 enum elf32_arm_stub_type stub_type;
2747 enum arm_st_branch_type branch_type;
2750 /* A table of relocs applied to branches which might trigger Cortex-A8
2753 struct a8_erratum_reloc
2756 bfd_vma destination;
2757 struct elf32_arm_link_hash_entry *hash;
2758 const char *sym_name;
2759 unsigned int r_type;
2760 enum arm_st_branch_type branch_type;
2761 bfd_boolean non_a8_stub;
2764 /* The size of the thread control block. */
2767 /* ARM-specific information about a PLT entry, over and above the usual
2771 /* We reference count Thumb references to a PLT entry separately,
2772 so that we can emit the Thumb trampoline only if needed. */
2773 bfd_signed_vma thumb_refcount;
2775 /* Some references from Thumb code may be eliminated by BL->BLX
2776 conversion, so record them separately. */
2777 bfd_signed_vma maybe_thumb_refcount;
2779 /* How many of the recorded PLT accesses were from non-call relocations.
2780 This information is useful when deciding whether anything takes the
2781 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2782 non-call references to the function should resolve directly to the
2783 real runtime target. */
2784 unsigned int noncall_refcount;
2786 /* Since PLT entries have variable size if the Thumb prologue is
2787 used, we need to record the index into .got.plt instead of
2788 recomputing it from the PLT offset. */
2789 bfd_signed_vma got_offset;
2792 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2793 struct arm_local_iplt_info
2795 /* The information that is usually found in the generic ELF part of
2796 the hash table entry. */
2797 union gotplt_union root;
2799 /* The information that is usually found in the ARM-specific part of
2800 the hash table entry. */
2801 struct arm_plt_info arm;
2803 /* A list of all potential dynamic relocations against this symbol. */
2804 struct elf_dyn_relocs *dyn_relocs;
2807 struct elf_arm_obj_tdata
2809 struct elf_obj_tdata root;
2811 /* tls_type for each local got entry. */
2812 char *local_got_tls_type;
2814 /* GOTPLT entries for TLS descriptors. */
2815 bfd_vma *local_tlsdesc_gotent;
2817 /* Information for local symbols that need entries in .iplt. */
2818 struct arm_local_iplt_info **local_iplt;
2820 /* Zero to warn when linking objects with incompatible enum sizes. */
2821 int no_enum_size_warning;
2823 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2824 int no_wchar_size_warning;
2827 #define elf_arm_tdata(bfd) \
2828 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2830 #define elf32_arm_local_got_tls_type(bfd) \
2831 (elf_arm_tdata (bfd)->local_got_tls_type)
2833 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2834 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2836 #define elf32_arm_local_iplt(bfd) \
2837 (elf_arm_tdata (bfd)->local_iplt)
2839 #define is_arm_elf(bfd) \
2840 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2841 && elf_tdata (bfd) != NULL \
2842 && elf_object_id (bfd) == ARM_ELF_DATA)
2845 elf32_arm_mkobject (bfd *abfd)
2847 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2851 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2853 /* Arm ELF linker hash entry. */
2854 struct elf32_arm_link_hash_entry
2856 struct elf_link_hash_entry root;
2858 /* Track dynamic relocs copied for this symbol. */
2859 struct elf_dyn_relocs *dyn_relocs;
2861 /* ARM-specific PLT information. */
2862 struct arm_plt_info plt;
2864 #define GOT_UNKNOWN 0
2865 #define GOT_NORMAL 1
2866 #define GOT_TLS_GD 2
2867 #define GOT_TLS_IE 4
2868 #define GOT_TLS_GDESC 8
2869 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2870 unsigned int tls_type : 8;
2872 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2873 unsigned int is_iplt : 1;
2875 unsigned int unused : 23;
2877 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2878 starting at the end of the jump table. */
2879 bfd_vma tlsdesc_got;
2881 /* The symbol marking the real symbol location for exported thumb
2882 symbols with Arm stubs. */
2883 struct elf_link_hash_entry *export_glue;
2885 /* A pointer to the most recently used stub hash entry against this
2887 struct elf32_arm_stub_hash_entry *stub_cache;
2890 /* Traverse an arm ELF linker hash table. */
2891 #define elf32_arm_link_hash_traverse(table, func, info) \
2892 (elf_link_hash_traverse \
2894 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2897 /* Get the ARM elf linker hash table from a link_info structure. */
2898 #define elf32_arm_hash_table(info) \
2899 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2900 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2902 #define arm_stub_hash_lookup(table, string, create, copy) \
2903 ((struct elf32_arm_stub_hash_entry *) \
2904 bfd_hash_lookup ((table), (string), (create), (copy)))
2906 /* Array to keep track of which stub sections have been created, and
2907 information on stub grouping. */
2910 /* This is the section to which stubs in the group will be
2913 /* The stub section. */
2917 #define elf32_arm_compute_jump_table_size(htab) \
2918 ((htab)->next_tls_desc_index * 4)
2920 /* ARM ELF linker hash table. */
2921 struct elf32_arm_link_hash_table
2923 /* The main hash table. */
2924 struct elf_link_hash_table root;
2926 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2927 bfd_size_type thumb_glue_size;
2929 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2930 bfd_size_type arm_glue_size;
2932 /* The size in bytes of section containing the ARMv4 BX veneers. */
2933 bfd_size_type bx_glue_size;
2935 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2936 veneer has been populated. */
2937 bfd_vma bx_glue_offset[15];
2939 /* The size in bytes of the section containing glue for VFP11 erratum
2941 bfd_size_type vfp11_erratum_glue_size;
2943 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2944 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2945 elf32_arm_write_section(). */
2946 struct a8_erratum_fix *a8_erratum_fixes;
2947 unsigned int num_a8_erratum_fixes;
2949 /* An arbitrary input BFD chosen to hold the glue sections. */
2950 bfd * bfd_of_glue_owner;
2952 /* Nonzero to output a BE8 image. */
2955 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2956 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2959 /* The relocation to use for R_ARM_TARGET2 relocations. */
2962 /* 0 = Ignore R_ARM_V4BX.
2963 1 = Convert BX to MOV PC.
2964 2 = Generate v4 interworing stubs. */
2967 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2970 /* Whether we should fix the ARM1176 BLX immediate issue. */
2973 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2976 /* What sort of code sequences we should look for which may trigger the
2977 VFP11 denorm erratum. */
2978 bfd_arm_vfp11_fix vfp11_fix;
2980 /* Global counter for the number of fixes we have emitted. */
2981 int num_vfp11_fixes;
2983 /* Nonzero to force PIC branch veneers. */
2986 /* The number of bytes in the initial entry in the PLT. */
2987 bfd_size_type plt_header_size;
2989 /* The number of bytes in the subsequent PLT etries. */
2990 bfd_size_type plt_entry_size;
2992 /* True if the target system is VxWorks. */
2995 /* True if the target system is Symbian OS. */
2998 /* True if the target system is Native Client. */
3001 /* True if the target uses REL relocations. */
3004 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3005 bfd_vma next_tls_desc_index;
3007 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3008 bfd_vma num_tls_desc;
3010 /* Short-cuts to get to dynamic linker sections. */
3014 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3017 /* The offset into splt of the PLT entry for the TLS descriptor
3018 resolver. Special values are 0, if not necessary (or not found
3019 to be necessary yet), and -1 if needed but not determined
3021 bfd_vma dt_tlsdesc_plt;
3023 /* The offset into sgot of the GOT entry used by the PLT entry
3025 bfd_vma dt_tlsdesc_got;
3027 /* Offset in .plt section of tls_arm_trampoline. */
3028 bfd_vma tls_trampoline;
3030 /* Data for R_ARM_TLS_LDM32 relocations. */
3033 bfd_signed_vma refcount;
3037 /* Small local sym cache. */
3038 struct sym_cache sym_cache;
3040 /* For convenience in allocate_dynrelocs. */
3043 /* The amount of space used by the reserved portion of the sgotplt
3044 section, plus whatever space is used by the jump slots. */
3045 bfd_vma sgotplt_jump_table_size;
3047 /* The stub hash table. */
3048 struct bfd_hash_table stub_hash_table;
3050 /* Linker stub bfd. */
3053 /* Linker call-backs. */
3054 asection * (*add_stub_section) (const char *, asection *, unsigned int);
3055 void (*layout_sections_again) (void);
3057 /* Array to keep track of which stub sections have been created, and
3058 information on stub grouping. */
3059 struct map_stub *stub_group;
3061 /* Number of elements in stub_group. */
3064 /* Assorted information used by elf32_arm_size_stubs. */
3065 unsigned int bfd_count;
3067 asection **input_list;
3070 /* Create an entry in an ARM ELF linker hash table. */
3072 static struct bfd_hash_entry *
3073 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3074 struct bfd_hash_table * table,
3075 const char * string)
3077 struct elf32_arm_link_hash_entry * ret =
3078 (struct elf32_arm_link_hash_entry *) entry;
3080 /* Allocate the structure if it has not already been allocated by a
3083 ret = (struct elf32_arm_link_hash_entry *)
3084 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3086 return (struct bfd_hash_entry *) ret;
3088 /* Call the allocation method of the superclass. */
3089 ret = ((struct elf32_arm_link_hash_entry *)
3090 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3094 ret->dyn_relocs = NULL;
3095 ret->tls_type = GOT_UNKNOWN;
3096 ret->tlsdesc_got = (bfd_vma) -1;
3097 ret->plt.thumb_refcount = 0;
3098 ret->plt.maybe_thumb_refcount = 0;
3099 ret->plt.noncall_refcount = 0;
3100 ret->plt.got_offset = -1;
3101 ret->is_iplt = FALSE;
3102 ret->export_glue = NULL;
3104 ret->stub_cache = NULL;
3107 return (struct bfd_hash_entry *) ret;
3110 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3114 elf32_arm_allocate_local_sym_info (bfd *abfd)
3116 if (elf_local_got_refcounts (abfd) == NULL)
3118 bfd_size_type num_syms;
3122 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3123 size = num_syms * (sizeof (bfd_signed_vma)
3124 + sizeof (struct arm_local_iplt_info *)
3127 data = bfd_zalloc (abfd, size);
3131 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3132 data += num_syms * sizeof (bfd_signed_vma);
3134 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3135 data += num_syms * sizeof (struct arm_local_iplt_info *);
3137 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3138 data += num_syms * sizeof (bfd_vma);
3140 elf32_arm_local_got_tls_type (abfd) = data;
3145 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3146 to input bfd ABFD. Create the information if it doesn't already exist.
3147 Return null if an allocation fails. */
3149 static struct arm_local_iplt_info *
3150 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3152 struct arm_local_iplt_info **ptr;
3154 if (!elf32_arm_allocate_local_sym_info (abfd))
3157 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3158 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3160 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3164 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3165 in ABFD's symbol table. If the symbol is global, H points to its
3166 hash table entry, otherwise H is null.
3168 Return true if the symbol does have PLT information. When returning
3169 true, point *ROOT_PLT at the target-independent reference count/offset
3170 union and *ARM_PLT at the ARM-specific information. */
3173 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3174 unsigned long r_symndx, union gotplt_union **root_plt,
3175 struct arm_plt_info **arm_plt)
3177 struct arm_local_iplt_info *local_iplt;
3181 *root_plt = &h->root.plt;
3186 if (elf32_arm_local_iplt (abfd) == NULL)
3189 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3190 if (local_iplt == NULL)
3193 *root_plt = &local_iplt->root;
3194 *arm_plt = &local_iplt->arm;
3198 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3202 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3203 struct arm_plt_info *arm_plt)
3205 struct elf32_arm_link_hash_table *htab;
3207 htab = elf32_arm_hash_table (info);
3208 return (arm_plt->thumb_refcount != 0
3209 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3212 /* Return a pointer to the head of the dynamic reloc list that should
3213 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3214 ABFD's symbol table. Return null if an error occurs. */
3216 static struct elf_dyn_relocs **
3217 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3218 Elf_Internal_Sym *isym)
3220 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3222 struct arm_local_iplt_info *local_iplt;
3224 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3225 if (local_iplt == NULL)
3227 return &local_iplt->dyn_relocs;
3231 /* Track dynamic relocs needed for local syms too.
3232 We really need local syms available to do this
3237 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3241 vpp = &elf_section_data (s)->local_dynrel;
3242 return (struct elf_dyn_relocs **) vpp;
3246 /* Initialize an entry in the stub hash table. */
3248 static struct bfd_hash_entry *
3249 stub_hash_newfunc (struct bfd_hash_entry *entry,
3250 struct bfd_hash_table *table,
3253 /* Allocate the structure if it has not already been allocated by a
3257 entry = (struct bfd_hash_entry *)
3258 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3263 /* Call the allocation method of the superclass. */
3264 entry = bfd_hash_newfunc (entry, table, string);
3267 struct elf32_arm_stub_hash_entry *eh;
3269 /* Initialize the local fields. */
3270 eh = (struct elf32_arm_stub_hash_entry *) entry;
3271 eh->stub_sec = NULL;
3272 eh->stub_offset = 0;
3273 eh->target_value = 0;
3274 eh->target_section = NULL;
3275 eh->target_addend = 0;
3277 eh->stub_type = arm_stub_none;
3279 eh->stub_template = NULL;
3280 eh->stub_template_size = 0;
3283 eh->output_name = NULL;
3289 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3290 shortcuts to them in our hash table. */
3293 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3295 struct elf32_arm_link_hash_table *htab;
3297 htab = elf32_arm_hash_table (info);
3301 /* BPABI objects never have a GOT, or associated sections. */
3302 if (htab->symbian_p)
3305 if (! _bfd_elf_create_got_section (dynobj, info))
3311 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3314 create_ifunc_sections (struct bfd_link_info *info)
3316 struct elf32_arm_link_hash_table *htab;
3317 const struct elf_backend_data *bed;
3322 htab = elf32_arm_hash_table (info);
3323 dynobj = htab->root.dynobj;
3324 bed = get_elf_backend_data (dynobj);
3325 flags = bed->dynamic_sec_flags;
3327 if (htab->root.iplt == NULL)
3329 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3330 flags | SEC_READONLY | SEC_CODE);
3332 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3334 htab->root.iplt = s;
3337 if (htab->root.irelplt == NULL)
3339 s = bfd_make_section_anyway_with_flags (dynobj,
3340 RELOC_SECTION (htab, ".iplt"),
3341 flags | SEC_READONLY);
3343 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3345 htab->root.irelplt = s;
3348 if (htab->root.igotplt == NULL)
3350 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3352 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3354 htab->root.igotplt = s;
3359 /* Determine if we're dealing with a Thumb only architecture. */
3362 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3364 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3368 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3371 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3374 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3375 Tag_CPU_arch_profile);
3377 return profile == 'M';
3380 /* Determine if we're dealing with a Thumb-2 object. */
3383 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3385 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3387 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3390 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3391 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3395 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3397 struct elf32_arm_link_hash_table *htab;
3399 htab = elf32_arm_hash_table (info);
3403 if (!htab->root.sgot && !create_got_section (dynobj, info))
3406 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3409 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3411 htab->srelbss = bfd_get_linker_section (dynobj,
3412 RELOC_SECTION (htab, ".bss"));
3414 if (htab->vxworks_p)
3416 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3421 htab->plt_header_size = 0;
3422 htab->plt_entry_size
3423 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3427 htab->plt_header_size
3428 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3429 htab->plt_entry_size
3430 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3436 Test for thumb only architectures. Note - we cannot just call
3437 using_thumb_only() as the attributes in the output bfd have not been
3438 initialised at this point, so instead we use the input bfd. */
3439 bfd * saved_obfd = htab->obfd;
3441 htab->obfd = dynobj;
3442 if (using_thumb_only (htab))
3444 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3445 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3447 htab->obfd = saved_obfd;
3450 if (!htab->root.splt
3451 || !htab->root.srelplt
3453 || (!info->shared && !htab->srelbss))
3459 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3462 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3463 struct elf_link_hash_entry *dir,
3464 struct elf_link_hash_entry *ind)
3466 struct elf32_arm_link_hash_entry *edir, *eind;
3468 edir = (struct elf32_arm_link_hash_entry *) dir;
3469 eind = (struct elf32_arm_link_hash_entry *) ind;
3471 if (eind->dyn_relocs != NULL)
3473 if (edir->dyn_relocs != NULL)
3475 struct elf_dyn_relocs **pp;
3476 struct elf_dyn_relocs *p;
3478 /* Add reloc counts against the indirect sym to the direct sym
3479 list. Merge any entries against the same section. */
3480 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3482 struct elf_dyn_relocs *q;
3484 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3485 if (q->sec == p->sec)
3487 q->pc_count += p->pc_count;
3488 q->count += p->count;
3495 *pp = edir->dyn_relocs;
3498 edir->dyn_relocs = eind->dyn_relocs;
3499 eind->dyn_relocs = NULL;
3502 if (ind->root.type == bfd_link_hash_indirect)
3504 /* Copy over PLT info. */
3505 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3506 eind->plt.thumb_refcount = 0;
3507 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3508 eind->plt.maybe_thumb_refcount = 0;
3509 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3510 eind->plt.noncall_refcount = 0;
3512 /* We should only allocate a function to .iplt once the final
3513 symbol information is known. */
3514 BFD_ASSERT (!eind->is_iplt);
3516 if (dir->got.refcount <= 0)
3518 edir->tls_type = eind->tls_type;
3519 eind->tls_type = GOT_UNKNOWN;
3523 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3526 /* Destroy an ARM elf linker hash table. */
3529 elf32_arm_link_hash_table_free (bfd *obfd)
3531 struct elf32_arm_link_hash_table *ret
3532 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3534 bfd_hash_table_free (&ret->stub_hash_table);
3535 _bfd_elf_link_hash_table_free (obfd);
3538 /* Create an ARM elf linker hash table. */
3540 static struct bfd_link_hash_table *
3541 elf32_arm_link_hash_table_create (bfd *abfd)
3543 struct elf32_arm_link_hash_table *ret;
3544 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3546 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3550 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3551 elf32_arm_link_hash_newfunc,
3552 sizeof (struct elf32_arm_link_hash_entry),
3559 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3560 #ifdef FOUR_WORD_PLT
3561 ret->plt_header_size = 16;
3562 ret->plt_entry_size = 16;
3564 ret->plt_header_size = 20;
3565 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3570 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3571 sizeof (struct elf32_arm_stub_hash_entry)))
3573 _bfd_elf_link_hash_table_free (abfd);
3576 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3578 return &ret->root.root;
3581 /* Determine what kind of NOPs are available. */
3584 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3586 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3588 return arch == TAG_CPU_ARCH_V6T2
3589 || arch == TAG_CPU_ARCH_V6K
3590 || arch == TAG_CPU_ARCH_V7
3591 || arch == TAG_CPU_ARCH_V7E_M;
3595 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3597 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3599 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3600 || arch == TAG_CPU_ARCH_V7E_M);
3604 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3608 case arm_stub_long_branch_thumb_only:
3609 case arm_stub_long_branch_v4t_thumb_arm:
3610 case arm_stub_short_branch_v4t_thumb_arm:
3611 case arm_stub_long_branch_v4t_thumb_arm_pic:
3612 case arm_stub_long_branch_v4t_thumb_tls_pic:
3613 case arm_stub_long_branch_thumb_only_pic:
3624 /* Determine the type of stub needed, if any, for a call. */
3626 static enum elf32_arm_stub_type
3627 arm_type_of_stub (struct bfd_link_info *info,
3628 asection *input_sec,
3629 const Elf_Internal_Rela *rel,
3630 unsigned char st_type,
3631 enum arm_st_branch_type *actual_branch_type,
3632 struct elf32_arm_link_hash_entry *hash,
3633 bfd_vma destination,
3639 bfd_signed_vma branch_offset;
3640 unsigned int r_type;
3641 struct elf32_arm_link_hash_table * globals;
3644 enum elf32_arm_stub_type stub_type = arm_stub_none;
3646 enum arm_st_branch_type branch_type = *actual_branch_type;
3647 union gotplt_union *root_plt;
3648 struct arm_plt_info *arm_plt;
3650 if (branch_type == ST_BRANCH_LONG)
3653 globals = elf32_arm_hash_table (info);
3654 if (globals == NULL)
3657 thumb_only = using_thumb_only (globals);
3659 thumb2 = using_thumb2 (globals);
3661 /* Determine where the call point is. */
3662 location = (input_sec->output_offset
3663 + input_sec->output_section->vma
3666 r_type = ELF32_R_TYPE (rel->r_info);
3668 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3669 are considering a function call relocation. */
3670 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3671 && branch_type == ST_BRANCH_TO_ARM)
3672 branch_type = ST_BRANCH_TO_THUMB;
3674 /* For TLS call relocs, it is the caller's responsibility to provide
3675 the address of the appropriate trampoline. */
3676 if (r_type != R_ARM_TLS_CALL
3677 && r_type != R_ARM_THM_TLS_CALL
3678 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3679 &root_plt, &arm_plt)
3680 && root_plt->offset != (bfd_vma) -1)
3684 if (hash == NULL || hash->is_iplt)
3685 splt = globals->root.iplt;
3687 splt = globals->root.splt;
3692 /* Note when dealing with PLT entries: the main PLT stub is in
3693 ARM mode, so if the branch is in Thumb mode, another
3694 Thumb->ARM stub will be inserted later just before the ARM
3695 PLT stub. We don't take this extra distance into account
3696 here, because if a long branch stub is needed, we'll add a
3697 Thumb->Arm one and branch directly to the ARM PLT entry
3698 because it avoids spreading offset corrections in several
3701 destination = (splt->output_section->vma
3702 + splt->output_offset
3703 + root_plt->offset);
3705 branch_type = ST_BRANCH_TO_ARM;
3708 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3709 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3711 branch_offset = (bfd_signed_vma)(destination - location);
3713 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3714 || r_type == R_ARM_THM_TLS_CALL)
3716 /* Handle cases where:
3717 - this call goes too far (different Thumb/Thumb2 max
3719 - it's a Thumb->Arm call and blx is not available, or it's a
3720 Thumb->Arm branch (not bl). A stub is needed in this case,
3721 but only if this call is not through a PLT entry. Indeed,
3722 PLT stubs handle mode switching already.
3725 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3726 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3728 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3729 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3730 || (branch_type == ST_BRANCH_TO_ARM
3731 && (((r_type == R_ARM_THM_CALL
3732 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3733 || (r_type == R_ARM_THM_JUMP24))
3736 if (branch_type == ST_BRANCH_TO_THUMB)
3738 /* Thumb to thumb. */
3741 stub_type = (info->shared | globals->pic_veneer)
3743 ? ((globals->use_blx
3744 && (r_type == R_ARM_THM_CALL))
3745 /* V5T and above. Stub starts with ARM code, so
3746 we must be able to switch mode before
3747 reaching it, which is only possible for 'bl'
3748 (ie R_ARM_THM_CALL relocation). */
3749 ? arm_stub_long_branch_any_thumb_pic
3750 /* On V4T, use Thumb code only. */
3751 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3753 /* non-PIC stubs. */
3754 : ((globals->use_blx
3755 && (r_type == R_ARM_THM_CALL))
3756 /* V5T and above. */
3757 ? arm_stub_long_branch_any_any
3759 : arm_stub_long_branch_v4t_thumb_thumb);
3763 stub_type = (info->shared | globals->pic_veneer)
3765 ? arm_stub_long_branch_thumb_only_pic
3767 : arm_stub_long_branch_thumb_only;
3774 && sym_sec->owner != NULL
3775 && !INTERWORK_FLAG (sym_sec->owner))
3777 (*_bfd_error_handler)
3778 (_("%B(%s): warning: interworking not enabled.\n"
3779 " first occurrence: %B: Thumb call to ARM"),
3780 sym_sec->owner, input_bfd, name);
3784 (info->shared | globals->pic_veneer)
3786 ? (r_type == R_ARM_THM_TLS_CALL
3787 /* TLS PIC stubs. */
3788 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3789 : arm_stub_long_branch_v4t_thumb_tls_pic)
3790 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3791 /* V5T PIC and above. */
3792 ? arm_stub_long_branch_any_arm_pic
3794 : arm_stub_long_branch_v4t_thumb_arm_pic))
3796 /* non-PIC stubs. */
3797 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3798 /* V5T and above. */
3799 ? arm_stub_long_branch_any_any
3801 : arm_stub_long_branch_v4t_thumb_arm);
3803 /* Handle v4t short branches. */
3804 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3805 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3806 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3807 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3811 else if (r_type == R_ARM_CALL
3812 || r_type == R_ARM_JUMP24
3813 || r_type == R_ARM_PLT32
3814 || r_type == R_ARM_TLS_CALL)
3816 if (branch_type == ST_BRANCH_TO_THUMB)
3821 && sym_sec->owner != NULL
3822 && !INTERWORK_FLAG (sym_sec->owner))
3824 (*_bfd_error_handler)
3825 (_("%B(%s): warning: interworking not enabled.\n"
3826 " first occurrence: %B: ARM call to Thumb"),
3827 sym_sec->owner, input_bfd, name);
3830 /* We have an extra 2-bytes reach because of
3831 the mode change (bit 24 (H) of BLX encoding). */
3832 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3833 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3834 || (r_type == R_ARM_CALL && !globals->use_blx)
3835 || (r_type == R_ARM_JUMP24)
3836 || (r_type == R_ARM_PLT32))
3838 stub_type = (info->shared | globals->pic_veneer)
3840 ? ((globals->use_blx)
3841 /* V5T and above. */
3842 ? arm_stub_long_branch_any_thumb_pic
3844 : arm_stub_long_branch_v4t_arm_thumb_pic)
3846 /* non-PIC stubs. */
3847 : ((globals->use_blx)
3848 /* V5T and above. */
3849 ? arm_stub_long_branch_any_any
3851 : arm_stub_long_branch_v4t_arm_thumb);
3857 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3858 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3861 (info->shared | globals->pic_veneer)
3863 ? (r_type == R_ARM_TLS_CALL
3865 ? arm_stub_long_branch_any_tls_pic
3867 ? arm_stub_long_branch_arm_nacl_pic
3868 : arm_stub_long_branch_any_arm_pic))
3869 /* non-PIC stubs. */
3871 ? arm_stub_long_branch_arm_nacl
3872 : arm_stub_long_branch_any_any);
3877 /* If a stub is needed, record the actual destination type. */
3878 if (stub_type != arm_stub_none)
3879 *actual_branch_type = branch_type;
3884 /* Build a name for an entry in the stub hash table. */
3887 elf32_arm_stub_name (const asection *input_section,
3888 const asection *sym_sec,
3889 const struct elf32_arm_link_hash_entry *hash,
3890 const Elf_Internal_Rela *rel,
3891 enum elf32_arm_stub_type stub_type)
3898 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3899 stub_name = (char *) bfd_malloc (len);
3900 if (stub_name != NULL)
3901 sprintf (stub_name, "%08x_%s+%x_%d",
3902 input_section->id & 0xffffffff,
3903 hash->root.root.root.string,
3904 (int) rel->r_addend & 0xffffffff,
3909 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3910 stub_name = (char *) bfd_malloc (len);
3911 if (stub_name != NULL)
3912 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3913 input_section->id & 0xffffffff,
3914 sym_sec->id & 0xffffffff,
3915 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3916 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3917 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3918 (int) rel->r_addend & 0xffffffff,
3925 /* Look up an entry in the stub hash. Stub entries are cached because
3926 creating the stub name takes a bit of time. */
3928 static struct elf32_arm_stub_hash_entry *
3929 elf32_arm_get_stub_entry (const asection *input_section,
3930 const asection *sym_sec,
3931 struct elf_link_hash_entry *hash,
3932 const Elf_Internal_Rela *rel,
3933 struct elf32_arm_link_hash_table *htab,
3934 enum elf32_arm_stub_type stub_type)
3936 struct elf32_arm_stub_hash_entry *stub_entry;
3937 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3938 const asection *id_sec;
3940 if ((input_section->flags & SEC_CODE) == 0)
3943 /* If this input section is part of a group of sections sharing one
3944 stub section, then use the id of the first section in the group.
3945 Stub names need to include a section id, as there may well be
3946 more than one stub used to reach say, printf, and we need to
3947 distinguish between them. */
3948 id_sec = htab->stub_group[input_section->id].link_sec;
3950 if (h != NULL && h->stub_cache != NULL
3951 && h->stub_cache->h == h
3952 && h->stub_cache->id_sec == id_sec
3953 && h->stub_cache->stub_type == stub_type)
3955 stub_entry = h->stub_cache;
3961 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3962 if (stub_name == NULL)
3965 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3966 stub_name, FALSE, FALSE);
3968 h->stub_cache = stub_entry;
3976 /* Find or create a stub section. Returns a pointer to the stub section, and
3977 the section to which the stub section will be attached (in *LINK_SEC_P).
3978 LINK_SEC_P may be NULL. */
3981 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3982 struct elf32_arm_link_hash_table *htab)
3987 link_sec = htab->stub_group[section->id].link_sec;
3988 BFD_ASSERT (link_sec != NULL);
3989 stub_sec = htab->stub_group[section->id].stub_sec;
3991 if (stub_sec == NULL)
3993 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3994 if (stub_sec == NULL)
4000 namelen = strlen (link_sec->name);
4001 len = namelen + sizeof (STUB_SUFFIX);
4002 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4006 memcpy (s_name, link_sec->name, namelen);
4007 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4008 stub_sec = (*htab->add_stub_section) (s_name, link_sec,
4009 htab->nacl_p ? 4 : 3);
4010 if (stub_sec == NULL)
4012 htab->stub_group[link_sec->id].stub_sec = stub_sec;
4014 htab->stub_group[section->id].stub_sec = stub_sec;
4018 *link_sec_p = link_sec;
4023 /* Add a new stub entry to the stub hash. Not all fields of the new
4024 stub entry are initialised. */
4026 static struct elf32_arm_stub_hash_entry *
4027 elf32_arm_add_stub (const char *stub_name,
4029 struct elf32_arm_link_hash_table *htab)
4033 struct elf32_arm_stub_hash_entry *stub_entry;
4035 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
4036 if (stub_sec == NULL)
4039 /* Enter this entry into the linker stub hash table. */
4040 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4042 if (stub_entry == NULL)
4044 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4050 stub_entry->stub_sec = stub_sec;
4051 stub_entry->stub_offset = 0;
4052 stub_entry->id_sec = link_sec;
4057 /* Store an Arm insn into an output section not processed by
4058 elf32_arm_write_section. */
4061 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4062 bfd * output_bfd, bfd_vma val, void * ptr)
4064 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4065 bfd_putl32 (val, ptr);
4067 bfd_putb32 (val, ptr);
4070 /* Store a 16-bit Thumb insn into an output section not processed by
4071 elf32_arm_write_section. */
4074 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4075 bfd * output_bfd, bfd_vma val, void * ptr)
4077 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4078 bfd_putl16 (val, ptr);
4080 bfd_putb16 (val, ptr);
4083 /* If it's possible to change R_TYPE to a more efficient access
4084 model, return the new reloc type. */
4087 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4088 struct elf_link_hash_entry *h)
4090 int is_local = (h == NULL);
4092 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
4095 /* We do not support relaxations for Old TLS models. */
4098 case R_ARM_TLS_GOTDESC:
4099 case R_ARM_TLS_CALL:
4100 case R_ARM_THM_TLS_CALL:
4101 case R_ARM_TLS_DESCSEQ:
4102 case R_ARM_THM_TLS_DESCSEQ:
4103 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4109 static bfd_reloc_status_type elf32_arm_final_link_relocate
4110 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4111 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4112 const char *, unsigned char, enum arm_st_branch_type,
4113 struct elf_link_hash_entry *, bfd_boolean *, char **);
4116 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4120 case arm_stub_a8_veneer_b_cond:
4121 case arm_stub_a8_veneer_b:
4122 case arm_stub_a8_veneer_bl:
4125 case arm_stub_long_branch_any_any:
4126 case arm_stub_long_branch_v4t_arm_thumb:
4127 case arm_stub_long_branch_thumb_only:
4128 case arm_stub_long_branch_v4t_thumb_thumb:
4129 case arm_stub_long_branch_v4t_thumb_arm:
4130 case arm_stub_short_branch_v4t_thumb_arm:
4131 case arm_stub_long_branch_any_arm_pic:
4132 case arm_stub_long_branch_any_thumb_pic:
4133 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4134 case arm_stub_long_branch_v4t_arm_thumb_pic:
4135 case arm_stub_long_branch_v4t_thumb_arm_pic:
4136 case arm_stub_long_branch_thumb_only_pic:
4137 case arm_stub_long_branch_any_tls_pic:
4138 case arm_stub_long_branch_v4t_thumb_tls_pic:
4139 case arm_stub_a8_veneer_blx:
4142 case arm_stub_long_branch_arm_nacl:
4143 case arm_stub_long_branch_arm_nacl_pic:
4147 abort (); /* Should be unreachable. */
4152 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4156 struct elf32_arm_stub_hash_entry *stub_entry;
4157 struct elf32_arm_link_hash_table *globals;
4158 struct bfd_link_info *info;
4165 const insn_sequence *template_sequence;
4167 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4168 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4171 /* Massage our args to the form they really have. */
4172 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4173 info = (struct bfd_link_info *) in_arg;
4175 globals = elf32_arm_hash_table (info);
4176 if (globals == NULL)
4179 stub_sec = stub_entry->stub_sec;
4181 if ((globals->fix_cortex_a8 < 0)
4182 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4183 /* We have to do less-strictly-aligned fixes last. */
4186 /* Make a note of the offset within the stubs for this entry. */
4187 stub_entry->stub_offset = stub_sec->size;
4188 loc = stub_sec->contents + stub_entry->stub_offset;
4190 stub_bfd = stub_sec->owner;
4192 /* This is the address of the stub destination. */
4193 sym_value = (stub_entry->target_value
4194 + stub_entry->target_section->output_offset
4195 + stub_entry->target_section->output_section->vma);
4197 template_sequence = stub_entry->stub_template;
4198 template_size = stub_entry->stub_template_size;
4201 for (i = 0; i < template_size; i++)
4203 switch (template_sequence[i].type)
4207 bfd_vma data = (bfd_vma) template_sequence[i].data;
4208 if (template_sequence[i].reloc_addend != 0)
4210 /* We've borrowed the reloc_addend field to mean we should
4211 insert a condition code into this (Thumb-1 branch)
4212 instruction. See THUMB16_BCOND_INSN. */
4213 BFD_ASSERT ((data & 0xff00) == 0xd000);
4214 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4216 bfd_put_16 (stub_bfd, data, loc + size);
4222 bfd_put_16 (stub_bfd,
4223 (template_sequence[i].data >> 16) & 0xffff,
4225 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4227 if (template_sequence[i].r_type != R_ARM_NONE)
4229 stub_reloc_idx[nrelocs] = i;
4230 stub_reloc_offset[nrelocs++] = size;
4236 bfd_put_32 (stub_bfd, template_sequence[i].data,
4238 /* Handle cases where the target is encoded within the
4240 if (template_sequence[i].r_type == R_ARM_JUMP24)
4242 stub_reloc_idx[nrelocs] = i;
4243 stub_reloc_offset[nrelocs++] = size;
4249 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4250 stub_reloc_idx[nrelocs] = i;
4251 stub_reloc_offset[nrelocs++] = size;
4261 stub_sec->size += size;
4263 /* Stub size has already been computed in arm_size_one_stub. Check
4265 BFD_ASSERT (size == stub_entry->stub_size);
4267 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4268 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4271 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4273 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4275 for (i = 0; i < nrelocs; i++)
4276 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4277 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4278 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4279 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4281 Elf_Internal_Rela rel;
4282 bfd_boolean unresolved_reloc;
4283 char *error_message;
4284 enum arm_st_branch_type branch_type
4285 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4286 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4287 bfd_vma points_to = sym_value + stub_entry->target_addend;
4289 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4290 rel.r_info = ELF32_R_INFO (0,
4291 template_sequence[stub_reloc_idx[i]].r_type);
4292 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4294 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4295 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4296 template should refer back to the instruction after the original
4298 points_to = sym_value;
4300 /* There may be unintended consequences if this is not true. */
4301 BFD_ASSERT (stub_entry->h == NULL);
4303 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4304 properly. We should probably use this function unconditionally,
4305 rather than only for certain relocations listed in the enclosing
4306 conditional, for the sake of consistency. */
4307 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4308 (template_sequence[stub_reloc_idx[i]].r_type),
4309 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4310 points_to, info, stub_entry->target_section, "", STT_FUNC,
4311 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4312 &unresolved_reloc, &error_message);
4316 Elf_Internal_Rela rel;
4317 bfd_boolean unresolved_reloc;
4318 char *error_message;
4319 bfd_vma points_to = sym_value + stub_entry->target_addend
4320 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4322 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4323 rel.r_info = ELF32_R_INFO (0,
4324 template_sequence[stub_reloc_idx[i]].r_type);
4327 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4328 (template_sequence[stub_reloc_idx[i]].r_type),
4329 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4330 points_to, info, stub_entry->target_section, "", STT_FUNC,
4331 stub_entry->branch_type,
4332 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4340 /* Calculate the template, template size and instruction size for a stub.
4341 Return value is the instruction size. */
4344 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4345 const insn_sequence **stub_template,
4346 int *stub_template_size)
4348 const insn_sequence *template_sequence = NULL;
4349 int template_size = 0, i;
4352 template_sequence = stub_definitions[stub_type].template_sequence;
4354 *stub_template = template_sequence;
4356 template_size = stub_definitions[stub_type].template_size;
4357 if (stub_template_size)
4358 *stub_template_size = template_size;
4361 for (i = 0; i < template_size; i++)
4363 switch (template_sequence[i].type)
4384 /* As above, but don't actually build the stub. Just bump offset so
4385 we know stub section sizes. */
4388 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4389 void *in_arg ATTRIBUTE_UNUSED)
4391 struct elf32_arm_stub_hash_entry *stub_entry;
4392 const insn_sequence *template_sequence;
4393 int template_size, size;
4395 /* Massage our args to the form they really have. */
4396 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4398 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4399 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4401 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4404 stub_entry->stub_size = size;
4405 stub_entry->stub_template = template_sequence;
4406 stub_entry->stub_template_size = template_size;
4408 size = (size + 7) & ~7;
4409 stub_entry->stub_sec->size += size;
4414 /* External entry points for sizing and building linker stubs. */
4416 /* Set up various things so that we can make a list of input sections
4417 for each output section included in the link. Returns -1 on error,
4418 0 when no stubs will be needed, and 1 on success. */
4421 elf32_arm_setup_section_lists (bfd *output_bfd,
4422 struct bfd_link_info *info)
4425 unsigned int bfd_count;
4426 int top_id, top_index;
4428 asection **input_list, **list;
4430 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4434 if (! is_elf_hash_table (htab))
4437 /* Count the number of input BFDs and find the top input section id. */
4438 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4440 input_bfd = input_bfd->link.next)
4443 for (section = input_bfd->sections;
4445 section = section->next)
4447 if (top_id < section->id)
4448 top_id = section->id;
4451 htab->bfd_count = bfd_count;
4453 amt = sizeof (struct map_stub) * (top_id + 1);
4454 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4455 if (htab->stub_group == NULL)
4457 htab->top_id = top_id;
4459 /* We can't use output_bfd->section_count here to find the top output
4460 section index as some sections may have been removed, and
4461 _bfd_strip_section_from_output doesn't renumber the indices. */
4462 for (section = output_bfd->sections, top_index = 0;
4464 section = section->next)
4466 if (top_index < section->index)
4467 top_index = section->index;
4470 htab->top_index = top_index;
4471 amt = sizeof (asection *) * (top_index + 1);
4472 input_list = (asection **) bfd_malloc (amt);
4473 htab->input_list = input_list;
4474 if (input_list == NULL)
4477 /* For sections we aren't interested in, mark their entries with a
4478 value we can check later. */
4479 list = input_list + top_index;
4481 *list = bfd_abs_section_ptr;
4482 while (list-- != input_list);
4484 for (section = output_bfd->sections;
4486 section = section->next)
4488 if ((section->flags & SEC_CODE) != 0)
4489 input_list[section->index] = NULL;
4495 /* The linker repeatedly calls this function for each input section,
4496 in the order that input sections are linked into output sections.
4497 Build lists of input sections to determine groupings between which
4498 we may insert linker stubs. */
4501 elf32_arm_next_input_section (struct bfd_link_info *info,
4504 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4509 if (isec->output_section->index <= htab->top_index)
4511 asection **list = htab->input_list + isec->output_section->index;
4513 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4515 /* Steal the link_sec pointer for our list. */
4516 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4517 /* This happens to make the list in reverse order,
4518 which we reverse later. */
4519 PREV_SEC (isec) = *list;
4525 /* See whether we can group stub sections together. Grouping stub
4526 sections may result in fewer stubs. More importantly, we need to
4527 put all .init* and .fini* stubs at the end of the .init or
4528 .fini output sections respectively, because glibc splits the
4529 _init and _fini functions into multiple parts. Putting a stub in
4530 the middle of a function is not a good idea. */
4533 group_sections (struct elf32_arm_link_hash_table *htab,
4534 bfd_size_type stub_group_size,
4535 bfd_boolean stubs_always_after_branch)
4537 asection **list = htab->input_list;
4541 asection *tail = *list;
4544 if (tail == bfd_abs_section_ptr)
4547 /* Reverse the list: we must avoid placing stubs at the
4548 beginning of the section because the beginning of the text
4549 section may be required for an interrupt vector in bare metal
4551 #define NEXT_SEC PREV_SEC
4553 while (tail != NULL)
4555 /* Pop from tail. */
4556 asection *item = tail;
4557 tail = PREV_SEC (item);
4560 NEXT_SEC (item) = head;
4564 while (head != NULL)
4568 bfd_vma stub_group_start = head->output_offset;
4569 bfd_vma end_of_next;
4572 while (NEXT_SEC (curr) != NULL)
4574 next = NEXT_SEC (curr);
4575 end_of_next = next->output_offset + next->size;
4576 if (end_of_next - stub_group_start >= stub_group_size)
4577 /* End of NEXT is too far from start, so stop. */
4579 /* Add NEXT to the group. */
4583 /* OK, the size from the start to the start of CURR is less
4584 than stub_group_size and thus can be handled by one stub
4585 section. (Or the head section is itself larger than
4586 stub_group_size, in which case we may be toast.)
4587 We should really be keeping track of the total size of
4588 stubs added here, as stubs contribute to the final output
4592 next = NEXT_SEC (head);
4593 /* Set up this stub group. */
4594 htab->stub_group[head->id].link_sec = curr;
4596 while (head != curr && (head = next) != NULL);
4598 /* But wait, there's more! Input sections up to stub_group_size
4599 bytes after the stub section can be handled by it too. */
4600 if (!stubs_always_after_branch)
4602 stub_group_start = curr->output_offset + curr->size;
4604 while (next != NULL)
4606 end_of_next = next->output_offset + next->size;
4607 if (end_of_next - stub_group_start >= stub_group_size)
4608 /* End of NEXT is too far from stubs, so stop. */
4610 /* Add NEXT to the stub group. */
4612 next = NEXT_SEC (head);
4613 htab->stub_group[head->id].link_sec = curr;
4619 while (list++ != htab->input_list + htab->top_index);
4621 free (htab->input_list);
4626 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4630 a8_reloc_compare (const void *a, const void *b)
4632 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4633 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4635 if (ra->from < rb->from)
4637 else if (ra->from > rb->from)
4643 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4644 const char *, char **);
4646 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4647 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4648 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4652 cortex_a8_erratum_scan (bfd *input_bfd,
4653 struct bfd_link_info *info,
4654 struct a8_erratum_fix **a8_fixes_p,
4655 unsigned int *num_a8_fixes_p,
4656 unsigned int *a8_fix_table_size_p,
4657 struct a8_erratum_reloc *a8_relocs,
4658 unsigned int num_a8_relocs,
4659 unsigned prev_num_a8_fixes,
4660 bfd_boolean *stub_changed_p)
4663 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4664 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4665 unsigned int num_a8_fixes = *num_a8_fixes_p;
4666 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4671 for (section = input_bfd->sections;
4673 section = section->next)
4675 bfd_byte *contents = NULL;
4676 struct _arm_elf_section_data *sec_data;
4680 if (elf_section_type (section) != SHT_PROGBITS
4681 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4682 || (section->flags & SEC_EXCLUDE) != 0
4683 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4684 || (section->output_section == bfd_abs_section_ptr))
4687 base_vma = section->output_section->vma + section->output_offset;
4689 if (elf_section_data (section)->this_hdr.contents != NULL)
4690 contents = elf_section_data (section)->this_hdr.contents;
4691 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4694 sec_data = elf32_arm_section_data (section);
4696 for (span = 0; span < sec_data->mapcount; span++)
4698 unsigned int span_start = sec_data->map[span].vma;
4699 unsigned int span_end = (span == sec_data->mapcount - 1)
4700 ? section->size : sec_data->map[span + 1].vma;
4702 char span_type = sec_data->map[span].type;
4703 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4705 if (span_type != 't')
4708 /* Span is entirely within a single 4KB region: skip scanning. */
4709 if (((base_vma + span_start) & ~0xfff)
4710 == ((base_vma + span_end) & ~0xfff))
4713 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4715 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4716 * The branch target is in the same 4KB region as the
4717 first half of the branch.
4718 * The instruction before the branch is a 32-bit
4719 length non-branch instruction. */
4720 for (i = span_start; i < span_end;)
4722 unsigned int insn = bfd_getl16 (&contents[i]);
4723 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4724 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4726 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4731 /* Load the rest of the insn (in manual-friendly order). */
4732 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4734 /* Encoding T4: B<c>.W. */
4735 is_b = (insn & 0xf800d000) == 0xf0009000;
4736 /* Encoding T1: BL<c>.W. */
4737 is_bl = (insn & 0xf800d000) == 0xf000d000;
4738 /* Encoding T2: BLX<c>.W. */
4739 is_blx = (insn & 0xf800d000) == 0xf000c000;
4740 /* Encoding T3: B<c>.W (not permitted in IT block). */
4741 is_bcc = (insn & 0xf800d000) == 0xf0008000
4742 && (insn & 0x07f00000) != 0x03800000;
4745 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4747 if (((base_vma + i) & 0xfff) == 0xffe
4751 && ! last_was_branch)
4753 bfd_signed_vma offset = 0;
4754 bfd_boolean force_target_arm = FALSE;
4755 bfd_boolean force_target_thumb = FALSE;
4757 enum elf32_arm_stub_type stub_type = arm_stub_none;
4758 struct a8_erratum_reloc key, *found;
4759 bfd_boolean use_plt = FALSE;
4761 key.from = base_vma + i;
4762 found = (struct a8_erratum_reloc *)
4763 bsearch (&key, a8_relocs, num_a8_relocs,
4764 sizeof (struct a8_erratum_reloc),
4769 char *error_message = NULL;
4770 struct elf_link_hash_entry *entry;
4772 /* We don't care about the error returned from this
4773 function, only if there is glue or not. */
4774 entry = find_thumb_glue (info, found->sym_name,
4778 found->non_a8_stub = TRUE;
4780 /* Keep a simpler condition, for the sake of clarity. */
4781 if (htab->root.splt != NULL && found->hash != NULL
4782 && found->hash->root.plt.offset != (bfd_vma) -1)
4785 if (found->r_type == R_ARM_THM_CALL)
4787 if (found->branch_type == ST_BRANCH_TO_ARM
4789 force_target_arm = TRUE;
4791 force_target_thumb = TRUE;
4795 /* Check if we have an offending branch instruction. */
4797 if (found && found->non_a8_stub)
4798 /* We've already made a stub for this instruction, e.g.
4799 it's a long branch or a Thumb->ARM stub. Assume that
4800 stub will suffice to work around the A8 erratum (see
4801 setting of always_after_branch above). */
4805 offset = (insn & 0x7ff) << 1;
4806 offset |= (insn & 0x3f0000) >> 4;
4807 offset |= (insn & 0x2000) ? 0x40000 : 0;
4808 offset |= (insn & 0x800) ? 0x80000 : 0;
4809 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4810 if (offset & 0x100000)
4811 offset |= ~ ((bfd_signed_vma) 0xfffff);
4812 stub_type = arm_stub_a8_veneer_b_cond;
4814 else if (is_b || is_bl || is_blx)
4816 int s = (insn & 0x4000000) != 0;
4817 int j1 = (insn & 0x2000) != 0;
4818 int j2 = (insn & 0x800) != 0;
4822 offset = (insn & 0x7ff) << 1;
4823 offset |= (insn & 0x3ff0000) >> 4;
4827 if (offset & 0x1000000)
4828 offset |= ~ ((bfd_signed_vma) 0xffffff);
4831 offset &= ~ ((bfd_signed_vma) 3);
4833 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4834 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4837 if (stub_type != arm_stub_none)
4839 bfd_vma pc_for_insn = base_vma + i + 4;
4841 /* The original instruction is a BL, but the target is
4842 an ARM instruction. If we were not making a stub,
4843 the BL would have been converted to a BLX. Use the
4844 BLX stub instead in that case. */
4845 if (htab->use_blx && force_target_arm
4846 && stub_type == arm_stub_a8_veneer_bl)
4848 stub_type = arm_stub_a8_veneer_blx;
4852 /* Conversely, if the original instruction was
4853 BLX but the target is Thumb mode, use the BL
4855 else if (force_target_thumb
4856 && stub_type == arm_stub_a8_veneer_blx)
4858 stub_type = arm_stub_a8_veneer_bl;
4864 pc_for_insn &= ~ ((bfd_vma) 3);
4866 /* If we found a relocation, use the proper destination,
4867 not the offset in the (unrelocated) instruction.
4868 Note this is always done if we switched the stub type
4872 (bfd_signed_vma) (found->destination - pc_for_insn);
4874 /* If the stub will use a Thumb-mode branch to a
4875 PLT target, redirect it to the preceding Thumb
4877 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4878 offset -= PLT_THUMB_STUB_SIZE;
4880 target = pc_for_insn + offset;
4882 /* The BLX stub is ARM-mode code. Adjust the offset to
4883 take the different PC value (+8 instead of +4) into
4885 if (stub_type == arm_stub_a8_veneer_blx)
4888 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4890 char *stub_name = NULL;
4892 if (num_a8_fixes == a8_fix_table_size)
4894 a8_fix_table_size *= 2;
4895 a8_fixes = (struct a8_erratum_fix *)
4896 bfd_realloc (a8_fixes,
4897 sizeof (struct a8_erratum_fix)
4898 * a8_fix_table_size);
4901 if (num_a8_fixes < prev_num_a8_fixes)
4903 /* If we're doing a subsequent scan,
4904 check if we've found the same fix as
4905 before, and try and reuse the stub
4907 stub_name = a8_fixes[num_a8_fixes].stub_name;
4908 if ((a8_fixes[num_a8_fixes].section != section)
4909 || (a8_fixes[num_a8_fixes].offset != i))
4913 *stub_changed_p = TRUE;
4919 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4920 if (stub_name != NULL)
4921 sprintf (stub_name, "%x:%x", section->id, i);
4924 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4925 a8_fixes[num_a8_fixes].section = section;
4926 a8_fixes[num_a8_fixes].offset = i;
4927 a8_fixes[num_a8_fixes].addend = offset;
4928 a8_fixes[num_a8_fixes].orig_insn = insn;
4929 a8_fixes[num_a8_fixes].stub_name = stub_name;
4930 a8_fixes[num_a8_fixes].stub_type = stub_type;
4931 a8_fixes[num_a8_fixes].branch_type =
4932 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4939 i += insn_32bit ? 4 : 2;
4940 last_was_32bit = insn_32bit;
4941 last_was_branch = is_32bit_branch;
4945 if (elf_section_data (section)->this_hdr.contents == NULL)
4949 *a8_fixes_p = a8_fixes;
4950 *num_a8_fixes_p = num_a8_fixes;
4951 *a8_fix_table_size_p = a8_fix_table_size;
4956 /* Determine and set the size of the stub section for a final link.
4958 The basic idea here is to examine all the relocations looking for
4959 PC-relative calls to a target that is unreachable with a "bl"
4963 elf32_arm_size_stubs (bfd *output_bfd,
4965 struct bfd_link_info *info,
4966 bfd_signed_vma group_size,
4967 asection * (*add_stub_section) (const char *, asection *,
4969 void (*layout_sections_again) (void))
4971 bfd_size_type stub_group_size;
4972 bfd_boolean stubs_always_after_branch;
4973 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4974 struct a8_erratum_fix *a8_fixes = NULL;
4975 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4976 struct a8_erratum_reloc *a8_relocs = NULL;
4977 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4982 if (htab->fix_cortex_a8)
4984 a8_fixes = (struct a8_erratum_fix *)
4985 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4986 a8_relocs = (struct a8_erratum_reloc *)
4987 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4990 /* Propagate mach to stub bfd, because it may not have been
4991 finalized when we created stub_bfd. */
4992 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4993 bfd_get_mach (output_bfd));
4995 /* Stash our params away. */
4996 htab->stub_bfd = stub_bfd;
4997 htab->add_stub_section = add_stub_section;
4998 htab->layout_sections_again = layout_sections_again;
4999 stubs_always_after_branch = group_size < 0;
5001 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5002 as the first half of a 32-bit branch straddling two 4K pages. This is a
5003 crude way of enforcing that. */
5004 if (htab->fix_cortex_a8)
5005 stubs_always_after_branch = 1;
5008 stub_group_size = -group_size;
5010 stub_group_size = group_size;
5012 if (stub_group_size == 1)
5014 /* Default values. */
5015 /* Thumb branch range is +-4MB has to be used as the default
5016 maximum size (a given section can contain both ARM and Thumb
5017 code, so the worst case has to be taken into account).
5019 This value is 24K less than that, which allows for 2025
5020 12-byte stubs. If we exceed that, then we will fail to link.
5021 The user will have to relink with an explicit group size
5023 stub_group_size = 4170000;
5026 group_sections (htab, stub_group_size, stubs_always_after_branch);
5028 /* If we're applying the cortex A8 fix, we need to determine the
5029 program header size now, because we cannot change it later --
5030 that could alter section placements. Notice the A8 erratum fix
5031 ends up requiring the section addresses to remain unchanged
5032 modulo the page size. That's something we cannot represent
5033 inside BFD, and we don't want to force the section alignment to
5034 be the page size. */
5035 if (htab->fix_cortex_a8)
5036 (*htab->layout_sections_again) ();
5041 unsigned int bfd_indx;
5043 bfd_boolean stub_changed = FALSE;
5044 unsigned prev_num_a8_fixes = num_a8_fixes;
5047 for (input_bfd = info->input_bfds, bfd_indx = 0;
5049 input_bfd = input_bfd->link.next, bfd_indx++)
5051 Elf_Internal_Shdr *symtab_hdr;
5053 Elf_Internal_Sym *local_syms = NULL;
5055 if (!is_arm_elf (input_bfd))
5060 /* We'll need the symbol table in a second. */
5061 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5062 if (symtab_hdr->sh_info == 0)
5065 /* Walk over each section attached to the input bfd. */
5066 for (section = input_bfd->sections;
5068 section = section->next)
5070 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5072 /* If there aren't any relocs, then there's nothing more
5074 if ((section->flags & SEC_RELOC) == 0
5075 || section->reloc_count == 0
5076 || (section->flags & SEC_CODE) == 0)
5079 /* If this section is a link-once section that will be
5080 discarded, then don't create any stubs. */
5081 if (section->output_section == NULL
5082 || section->output_section->owner != output_bfd)
5085 /* Get the relocs. */
5087 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5088 NULL, info->keep_memory);
5089 if (internal_relocs == NULL)
5090 goto error_ret_free_local;
5092 /* Now examine each relocation. */
5093 irela = internal_relocs;
5094 irelaend = irela + section->reloc_count;
5095 for (; irela < irelaend; irela++)
5097 unsigned int r_type, r_indx;
5098 enum elf32_arm_stub_type stub_type;
5099 struct elf32_arm_stub_hash_entry *stub_entry;
5102 bfd_vma destination;
5103 struct elf32_arm_link_hash_entry *hash;
5104 const char *sym_name;
5106 const asection *id_sec;
5107 unsigned char st_type;
5108 enum arm_st_branch_type branch_type;
5109 bfd_boolean created_stub = FALSE;
5111 r_type = ELF32_R_TYPE (irela->r_info);
5112 r_indx = ELF32_R_SYM (irela->r_info);
5114 if (r_type >= (unsigned int) R_ARM_max)
5116 bfd_set_error (bfd_error_bad_value);
5117 error_ret_free_internal:
5118 if (elf_section_data (section)->relocs == NULL)
5119 free (internal_relocs);
5120 goto error_ret_free_local;
5124 if (r_indx >= symtab_hdr->sh_info)
5125 hash = elf32_arm_hash_entry
5126 (elf_sym_hashes (input_bfd)
5127 [r_indx - symtab_hdr->sh_info]);
5129 /* Only look for stubs on branch instructions, or
5130 non-relaxed TLSCALL */
5131 if ((r_type != (unsigned int) R_ARM_CALL)
5132 && (r_type != (unsigned int) R_ARM_THM_CALL)
5133 && (r_type != (unsigned int) R_ARM_JUMP24)
5134 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5135 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5136 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5137 && (r_type != (unsigned int) R_ARM_PLT32)
5138 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5139 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5140 && r_type == elf32_arm_tls_transition
5141 (info, r_type, &hash->root)
5142 && ((hash ? hash->tls_type
5143 : (elf32_arm_local_got_tls_type
5144 (input_bfd)[r_indx]))
5145 & GOT_TLS_GDESC) != 0))
5148 /* Now determine the call target, its name, value,
5155 if (r_type == (unsigned int) R_ARM_TLS_CALL
5156 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5158 /* A non-relaxed TLS call. The target is the
5159 plt-resident trampoline and nothing to do
5161 BFD_ASSERT (htab->tls_trampoline > 0);
5162 sym_sec = htab->root.splt;
5163 sym_value = htab->tls_trampoline;
5166 branch_type = ST_BRANCH_TO_ARM;
5170 /* It's a local symbol. */
5171 Elf_Internal_Sym *sym;
5173 if (local_syms == NULL)
5176 = (Elf_Internal_Sym *) symtab_hdr->contents;
5177 if (local_syms == NULL)
5179 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5180 symtab_hdr->sh_info, 0,
5182 if (local_syms == NULL)
5183 goto error_ret_free_internal;
5186 sym = local_syms + r_indx;
5187 if (sym->st_shndx == SHN_UNDEF)
5188 sym_sec = bfd_und_section_ptr;
5189 else if (sym->st_shndx == SHN_ABS)
5190 sym_sec = bfd_abs_section_ptr;
5191 else if (sym->st_shndx == SHN_COMMON)
5192 sym_sec = bfd_com_section_ptr;
5195 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5198 /* This is an undefined symbol. It can never
5202 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5203 sym_value = sym->st_value;
5204 destination = (sym_value + irela->r_addend
5205 + sym_sec->output_offset
5206 + sym_sec->output_section->vma);
5207 st_type = ELF_ST_TYPE (sym->st_info);
5208 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5210 = bfd_elf_string_from_elf_section (input_bfd,
5211 symtab_hdr->sh_link,
5216 /* It's an external symbol. */
5217 while (hash->root.root.type == bfd_link_hash_indirect
5218 || hash->root.root.type == bfd_link_hash_warning)
5219 hash = ((struct elf32_arm_link_hash_entry *)
5220 hash->root.root.u.i.link);
5222 if (hash->root.root.type == bfd_link_hash_defined
5223 || hash->root.root.type == bfd_link_hash_defweak)
5225 sym_sec = hash->root.root.u.def.section;
5226 sym_value = hash->root.root.u.def.value;
5228 struct elf32_arm_link_hash_table *globals =
5229 elf32_arm_hash_table (info);
5231 /* For a destination in a shared library,
5232 use the PLT stub as target address to
5233 decide whether a branch stub is
5236 && globals->root.splt != NULL
5238 && hash->root.plt.offset != (bfd_vma) -1)
5240 sym_sec = globals->root.splt;
5241 sym_value = hash->root.plt.offset;
5242 if (sym_sec->output_section != NULL)
5243 destination = (sym_value
5244 + sym_sec->output_offset
5245 + sym_sec->output_section->vma);
5247 else if (sym_sec->output_section != NULL)
5248 destination = (sym_value + irela->r_addend
5249 + sym_sec->output_offset
5250 + sym_sec->output_section->vma);
5252 else if ((hash->root.root.type == bfd_link_hash_undefined)
5253 || (hash->root.root.type == bfd_link_hash_undefweak))
5255 /* For a shared library, use the PLT stub as
5256 target address to decide whether a long
5257 branch stub is needed.
5258 For absolute code, they cannot be handled. */
5259 struct elf32_arm_link_hash_table *globals =
5260 elf32_arm_hash_table (info);
5263 && globals->root.splt != NULL
5265 && hash->root.plt.offset != (bfd_vma) -1)
5267 sym_sec = globals->root.splt;
5268 sym_value = hash->root.plt.offset;
5269 if (sym_sec->output_section != NULL)
5270 destination = (sym_value
5271 + sym_sec->output_offset
5272 + sym_sec->output_section->vma);
5279 bfd_set_error (bfd_error_bad_value);
5280 goto error_ret_free_internal;
5282 st_type = hash->root.type;
5283 branch_type = hash->root.target_internal;
5284 sym_name = hash->root.root.root.string;
5289 /* Determine what (if any) linker stub is needed. */
5290 stub_type = arm_type_of_stub (info, section, irela,
5291 st_type, &branch_type,
5292 hash, destination, sym_sec,
5293 input_bfd, sym_name);
5294 if (stub_type == arm_stub_none)
5297 /* Support for grouping stub sections. */
5298 id_sec = htab->stub_group[section->id].link_sec;
5300 /* Get the name of this stub. */
5301 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5304 goto error_ret_free_internal;
5306 /* We've either created a stub for this reloc already,
5307 or we are about to. */
5308 created_stub = TRUE;
5310 stub_entry = arm_stub_hash_lookup
5311 (&htab->stub_hash_table, stub_name,
5313 if (stub_entry != NULL)
5315 /* The proper stub has already been created. */
5317 stub_entry->target_value = sym_value;
5321 stub_entry = elf32_arm_add_stub (stub_name, section,
5323 if (stub_entry == NULL)
5326 goto error_ret_free_internal;
5329 stub_entry->target_value = sym_value;
5330 stub_entry->target_section = sym_sec;
5331 stub_entry->stub_type = stub_type;
5332 stub_entry->h = hash;
5333 stub_entry->branch_type = branch_type;
5335 if (sym_name == NULL)
5336 sym_name = "unnamed";
5337 stub_entry->output_name = (char *)
5338 bfd_alloc (htab->stub_bfd,
5339 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5340 + strlen (sym_name));
5341 if (stub_entry->output_name == NULL)
5344 goto error_ret_free_internal;
5347 /* For historical reasons, use the existing names for
5348 ARM-to-Thumb and Thumb-to-ARM stubs. */
5349 if ((r_type == (unsigned int) R_ARM_THM_CALL
5350 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5351 && branch_type == ST_BRANCH_TO_ARM)
5352 sprintf (stub_entry->output_name,
5353 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5354 else if ((r_type == (unsigned int) R_ARM_CALL
5355 || r_type == (unsigned int) R_ARM_JUMP24)
5356 && branch_type == ST_BRANCH_TO_THUMB)
5357 sprintf (stub_entry->output_name,
5358 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5360 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5363 stub_changed = TRUE;
5367 /* Look for relocations which might trigger Cortex-A8
5369 if (htab->fix_cortex_a8
5370 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5371 || r_type == (unsigned int) R_ARM_THM_JUMP19
5372 || r_type == (unsigned int) R_ARM_THM_CALL
5373 || r_type == (unsigned int) R_ARM_THM_XPC22))
5375 bfd_vma from = section->output_section->vma
5376 + section->output_offset
5379 if ((from & 0xfff) == 0xffe)
5381 /* Found a candidate. Note we haven't checked the
5382 destination is within 4K here: if we do so (and
5383 don't create an entry in a8_relocs) we can't tell
5384 that a branch should have been relocated when
5386 if (num_a8_relocs == a8_reloc_table_size)
5388 a8_reloc_table_size *= 2;
5389 a8_relocs = (struct a8_erratum_reloc *)
5390 bfd_realloc (a8_relocs,
5391 sizeof (struct a8_erratum_reloc)
5392 * a8_reloc_table_size);
5395 a8_relocs[num_a8_relocs].from = from;
5396 a8_relocs[num_a8_relocs].destination = destination;
5397 a8_relocs[num_a8_relocs].r_type = r_type;
5398 a8_relocs[num_a8_relocs].branch_type = branch_type;
5399 a8_relocs[num_a8_relocs].sym_name = sym_name;
5400 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5401 a8_relocs[num_a8_relocs].hash = hash;
5408 /* We're done with the internal relocs, free them. */
5409 if (elf_section_data (section)->relocs == NULL)
5410 free (internal_relocs);
5413 if (htab->fix_cortex_a8)
5415 /* Sort relocs which might apply to Cortex-A8 erratum. */
5416 qsort (a8_relocs, num_a8_relocs,
5417 sizeof (struct a8_erratum_reloc),
5420 /* Scan for branches which might trigger Cortex-A8 erratum. */
5421 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5422 &num_a8_fixes, &a8_fix_table_size,
5423 a8_relocs, num_a8_relocs,
5424 prev_num_a8_fixes, &stub_changed)
5426 goto error_ret_free_local;
5430 if (prev_num_a8_fixes != num_a8_fixes)
5431 stub_changed = TRUE;
5436 /* OK, we've added some stubs. Find out the new size of the
5438 for (stub_sec = htab->stub_bfd->sections;
5440 stub_sec = stub_sec->next)
5442 /* Ignore non-stub sections. */
5443 if (!strstr (stub_sec->name, STUB_SUFFIX))
5449 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5451 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5452 if (htab->fix_cortex_a8)
5453 for (i = 0; i < num_a8_fixes; i++)
5455 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5456 a8_fixes[i].section, htab);
5458 if (stub_sec == NULL)
5459 goto error_ret_free_local;
5462 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5467 /* Ask the linker to do its stuff. */
5468 (*htab->layout_sections_again) ();
5471 /* Add stubs for Cortex-A8 erratum fixes now. */
5472 if (htab->fix_cortex_a8)
5474 for (i = 0; i < num_a8_fixes; i++)
5476 struct elf32_arm_stub_hash_entry *stub_entry;
5477 char *stub_name = a8_fixes[i].stub_name;
5478 asection *section = a8_fixes[i].section;
5479 unsigned int section_id = a8_fixes[i].section->id;
5480 asection *link_sec = htab->stub_group[section_id].link_sec;
5481 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5482 const insn_sequence *template_sequence;
5483 int template_size, size = 0;
5485 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5487 if (stub_entry == NULL)
5489 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5495 stub_entry->stub_sec = stub_sec;
5496 stub_entry->stub_offset = 0;
5497 stub_entry->id_sec = link_sec;
5498 stub_entry->stub_type = a8_fixes[i].stub_type;
5499 stub_entry->target_section = a8_fixes[i].section;
5500 stub_entry->target_value = a8_fixes[i].offset;
5501 stub_entry->target_addend = a8_fixes[i].addend;
5502 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5503 stub_entry->branch_type = a8_fixes[i].branch_type;
5505 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5509 stub_entry->stub_size = size;
5510 stub_entry->stub_template = template_sequence;
5511 stub_entry->stub_template_size = template_size;
5514 /* Stash the Cortex-A8 erratum fix array for use later in
5515 elf32_arm_write_section(). */
5516 htab->a8_erratum_fixes = a8_fixes;
5517 htab->num_a8_erratum_fixes = num_a8_fixes;
5521 htab->a8_erratum_fixes = NULL;
5522 htab->num_a8_erratum_fixes = 0;
5526 error_ret_free_local:
5530 /* Build all the stubs associated with the current output file. The
5531 stubs are kept in a hash table attached to the main linker hash
5532 table. We also set up the .plt entries for statically linked PIC
5533 functions here. This function is called via arm_elf_finish in the
5537 elf32_arm_build_stubs (struct bfd_link_info *info)
5540 struct bfd_hash_table *table;
5541 struct elf32_arm_link_hash_table *htab;
5543 htab = elf32_arm_hash_table (info);
5547 for (stub_sec = htab->stub_bfd->sections;
5549 stub_sec = stub_sec->next)
5553 /* Ignore non-stub sections. */
5554 if (!strstr (stub_sec->name, STUB_SUFFIX))
5557 /* Allocate memory to hold the linker stubs. */
5558 size = stub_sec->size;
5559 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5560 if (stub_sec->contents == NULL && size != 0)
5565 /* Build the stubs as directed by the stub hash table. */
5566 table = &htab->stub_hash_table;
5567 bfd_hash_traverse (table, arm_build_one_stub, info);
5568 if (htab->fix_cortex_a8)
5570 /* Place the cortex a8 stubs last. */
5571 htab->fix_cortex_a8 = -1;
5572 bfd_hash_traverse (table, arm_build_one_stub, info);
5578 /* Locate the Thumb encoded calling stub for NAME. */
5580 static struct elf_link_hash_entry *
5581 find_thumb_glue (struct bfd_link_info *link_info,
5583 char **error_message)
5586 struct elf_link_hash_entry *hash;
5587 struct elf32_arm_link_hash_table *hash_table;
5589 /* We need a pointer to the armelf specific hash table. */
5590 hash_table = elf32_arm_hash_table (link_info);
5591 if (hash_table == NULL)
5594 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5595 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5597 BFD_ASSERT (tmp_name);
5599 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5601 hash = elf_link_hash_lookup
5602 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5605 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5606 tmp_name, name) == -1)
5607 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5614 /* Locate the ARM encoded calling stub for NAME. */
5616 static struct elf_link_hash_entry *
5617 find_arm_glue (struct bfd_link_info *link_info,
5619 char **error_message)
5622 struct elf_link_hash_entry *myh;
5623 struct elf32_arm_link_hash_table *hash_table;
5625 /* We need a pointer to the elfarm specific hash table. */
5626 hash_table = elf32_arm_hash_table (link_info);
5627 if (hash_table == NULL)
5630 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5631 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5633 BFD_ASSERT (tmp_name);
5635 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5637 myh = elf_link_hash_lookup
5638 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5641 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5642 tmp_name, name) == -1)
5643 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5650 /* ARM->Thumb glue (static images):
5654 ldr r12, __func_addr
5657 .word func @ behave as if you saw a ARM_32 reloc.
5664 .word func @ behave as if you saw a ARM_32 reloc.
5666 (relocatable images)
5669 ldr r12, __func_offset
5675 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5676 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5677 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5678 static const insn32 a2t3_func_addr_insn = 0x00000001;
5680 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5681 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5682 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5684 #define ARM2THUMB_PIC_GLUE_SIZE 16
5685 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5686 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5687 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5689 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5693 __func_from_thumb: __func_from_thumb:
5695 nop ldr r6, __func_addr
5705 #define THUMB2ARM_GLUE_SIZE 8
5706 static const insn16 t2a1_bx_pc_insn = 0x4778;
5707 static const insn16 t2a2_noop_insn = 0x46c0;
5708 static const insn32 t2a3_b_insn = 0xea000000;
5710 #define VFP11_ERRATUM_VENEER_SIZE 8
5712 #define ARM_BX_VENEER_SIZE 12
5713 static const insn32 armbx1_tst_insn = 0xe3100001;
5714 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5715 static const insn32 armbx3_bx_insn = 0xe12fff10;
5717 #ifndef ELFARM_NABI_C_INCLUDED
5719 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5722 bfd_byte * contents;
5726 /* Do not include empty glue sections in the output. */
5729 s = bfd_get_linker_section (abfd, name);
5731 s->flags |= SEC_EXCLUDE;
5736 BFD_ASSERT (abfd != NULL);
5738 s = bfd_get_linker_section (abfd, name);
5739 BFD_ASSERT (s != NULL);
5741 contents = (bfd_byte *) bfd_alloc (abfd, size);
5743 BFD_ASSERT (s->size == size);
5744 s->contents = contents;
5748 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5750 struct elf32_arm_link_hash_table * globals;
5752 globals = elf32_arm_hash_table (info);
5753 BFD_ASSERT (globals != NULL);
5755 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5756 globals->arm_glue_size,
5757 ARM2THUMB_GLUE_SECTION_NAME);
5759 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5760 globals->thumb_glue_size,
5761 THUMB2ARM_GLUE_SECTION_NAME);
5763 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5764 globals->vfp11_erratum_glue_size,
5765 VFP11_ERRATUM_VENEER_SECTION_NAME);
5767 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5768 globals->bx_glue_size,
5769 ARM_BX_GLUE_SECTION_NAME);
5774 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5775 returns the symbol identifying the stub. */
5777 static struct elf_link_hash_entry *
5778 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5779 struct elf_link_hash_entry * h)
5781 const char * name = h->root.root.string;
5784 struct elf_link_hash_entry * myh;
5785 struct bfd_link_hash_entry * bh;
5786 struct elf32_arm_link_hash_table * globals;
5790 globals = elf32_arm_hash_table (link_info);
5791 BFD_ASSERT (globals != NULL);
5792 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5794 s = bfd_get_linker_section
5795 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5797 BFD_ASSERT (s != NULL);
5799 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5800 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5802 BFD_ASSERT (tmp_name);
5804 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5806 myh = elf_link_hash_lookup
5807 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5811 /* We've already seen this guy. */
5816 /* The only trick here is using hash_table->arm_glue_size as the value.
5817 Even though the section isn't allocated yet, this is where we will be
5818 putting it. The +1 on the value marks that the stub has not been
5819 output yet - not that it is a Thumb function. */
5821 val = globals->arm_glue_size + 1;
5822 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5823 tmp_name, BSF_GLOBAL, s, val,
5824 NULL, TRUE, FALSE, &bh);
5826 myh = (struct elf_link_hash_entry *) bh;
5827 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5828 myh->forced_local = 1;
5832 if (link_info->shared || globals->root.is_relocatable_executable
5833 || globals->pic_veneer)
5834 size = ARM2THUMB_PIC_GLUE_SIZE;
5835 else if (globals->use_blx)
5836 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5838 size = ARM2THUMB_STATIC_GLUE_SIZE;
5841 globals->arm_glue_size += size;
5846 /* Allocate space for ARMv4 BX veneers. */
5849 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5852 struct elf32_arm_link_hash_table *globals;
5854 struct elf_link_hash_entry *myh;
5855 struct bfd_link_hash_entry *bh;
5858 /* BX PC does not need a veneer. */
5862 globals = elf32_arm_hash_table (link_info);
5863 BFD_ASSERT (globals != NULL);
5864 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5866 /* Check if this veneer has already been allocated. */
5867 if (globals->bx_glue_offset[reg])
5870 s = bfd_get_linker_section
5871 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5873 BFD_ASSERT (s != NULL);
5875 /* Add symbol for veneer. */
5877 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5879 BFD_ASSERT (tmp_name);
5881 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5883 myh = elf_link_hash_lookup
5884 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5886 BFD_ASSERT (myh == NULL);
5889 val = globals->bx_glue_size;
5890 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5891 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5892 NULL, TRUE, FALSE, &bh);
5894 myh = (struct elf_link_hash_entry *) bh;
5895 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5896 myh->forced_local = 1;
5898 s->size += ARM_BX_VENEER_SIZE;
5899 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5900 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5904 /* Add an entry to the code/data map for section SEC. */
5907 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5909 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5910 unsigned int newidx;
5912 if (sec_data->map == NULL)
5914 sec_data->map = (elf32_arm_section_map *)
5915 bfd_malloc (sizeof (elf32_arm_section_map));
5916 sec_data->mapcount = 0;
5917 sec_data->mapsize = 1;
5920 newidx = sec_data->mapcount++;
5922 if (sec_data->mapcount > sec_data->mapsize)
5924 sec_data->mapsize *= 2;
5925 sec_data->map = (elf32_arm_section_map *)
5926 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5927 * sizeof (elf32_arm_section_map));
5932 sec_data->map[newidx].vma = vma;
5933 sec_data->map[newidx].type = type;
5938 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5939 veneers are handled for now. */
5942 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5943 elf32_vfp11_erratum_list *branch,
5945 asection *branch_sec,
5946 unsigned int offset)
5949 struct elf32_arm_link_hash_table *hash_table;
5951 struct elf_link_hash_entry *myh;
5952 struct bfd_link_hash_entry *bh;
5954 struct _arm_elf_section_data *sec_data;
5955 elf32_vfp11_erratum_list *newerr;
5957 hash_table = elf32_arm_hash_table (link_info);
5958 BFD_ASSERT (hash_table != NULL);
5959 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5961 s = bfd_get_linker_section
5962 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5964 sec_data = elf32_arm_section_data (s);
5966 BFD_ASSERT (s != NULL);
5968 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5969 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5971 BFD_ASSERT (tmp_name);
5973 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5974 hash_table->num_vfp11_fixes);
5976 myh = elf_link_hash_lookup
5977 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5979 BFD_ASSERT (myh == NULL);
5982 val = hash_table->vfp11_erratum_glue_size;
5983 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5984 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5985 NULL, TRUE, FALSE, &bh);
5987 myh = (struct elf_link_hash_entry *) bh;
5988 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5989 myh->forced_local = 1;
5991 /* Link veneer back to calling location. */
5992 sec_data->erratumcount += 1;
5993 newerr = (elf32_vfp11_erratum_list *)
5994 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5996 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5998 newerr->u.v.branch = branch;
5999 newerr->u.v.id = hash_table->num_vfp11_fixes;
6000 branch->u.b.veneer = newerr;
6002 newerr->next = sec_data->erratumlist;
6003 sec_data->erratumlist = newerr;
6005 /* A symbol for the return from the veneer. */
6006 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6007 hash_table->num_vfp11_fixes);
6009 myh = elf_link_hash_lookup
6010 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6017 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6018 branch_sec, val, NULL, TRUE, FALSE, &bh);
6020 myh = (struct elf_link_hash_entry *) bh;
6021 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6022 myh->forced_local = 1;
6026 /* Generate a mapping symbol for the veneer section, and explicitly add an
6027 entry for that symbol to the code/data map for the section. */
6028 if (hash_table->vfp11_erratum_glue_size == 0)
6031 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6032 ever requires this erratum fix. */
6033 _bfd_generic_link_add_one_symbol (link_info,
6034 hash_table->bfd_of_glue_owner, "$a",
6035 BSF_LOCAL, s, 0, NULL,
6038 myh = (struct elf_link_hash_entry *) bh;
6039 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6040 myh->forced_local = 1;
6042 /* The elf32_arm_init_maps function only cares about symbols from input
6043 BFDs. We must make a note of this generated mapping symbol
6044 ourselves so that code byteswapping works properly in
6045 elf32_arm_write_section. */
6046 elf32_arm_section_map_add (s, 'a', 0);
6049 s->size += VFP11_ERRATUM_VENEER_SIZE;
6050 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6051 hash_table->num_vfp11_fixes++;
6053 /* The offset of the veneer. */
6057 #define ARM_GLUE_SECTION_FLAGS \
6058 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6059 | SEC_READONLY | SEC_LINKER_CREATED)
6061 /* Create a fake section for use by the ARM backend of the linker. */
6064 arm_make_glue_section (bfd * abfd, const char * name)
6068 sec = bfd_get_linker_section (abfd, name);
6073 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6076 || !bfd_set_section_alignment (abfd, sec, 2))
6079 /* Set the gc mark to prevent the section from being removed by garbage
6080 collection, despite the fact that no relocs refer to this section. */
6086 /* Set size of .plt entries. This function is called from the
6087 linker scripts in ld/emultempl/{armelf}.em. */
6090 bfd_elf32_arm_use_long_plt (void)
6092 elf32_arm_use_long_plt_entry = TRUE;
6095 /* Add the glue sections to ABFD. This function is called from the
6096 linker scripts in ld/emultempl/{armelf}.em. */
6099 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6100 struct bfd_link_info *info)
6102 /* If we are only performing a partial
6103 link do not bother adding the glue. */
6104 if (info->relocatable)
6107 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6108 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6109 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6110 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6113 /* Select a BFD to be used to hold the sections used by the glue code.
6114 This function is called from the linker scripts in ld/emultempl/
6118 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6120 struct elf32_arm_link_hash_table *globals;
6122 /* If we are only performing a partial link
6123 do not bother getting a bfd to hold the glue. */
6124 if (info->relocatable)
6127 /* Make sure we don't attach the glue sections to a dynamic object. */
6128 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6130 globals = elf32_arm_hash_table (info);
6131 BFD_ASSERT (globals != NULL);
6133 if (globals->bfd_of_glue_owner != NULL)
6136 /* Save the bfd for later use. */
6137 globals->bfd_of_glue_owner = abfd;
6143 check_use_blx (struct elf32_arm_link_hash_table *globals)
6147 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6150 if (globals->fix_arm1176)
6152 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6153 globals->use_blx = 1;
6157 if (cpu_arch > TAG_CPU_ARCH_V4T)
6158 globals->use_blx = 1;
6163 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6164 struct bfd_link_info *link_info)
6166 Elf_Internal_Shdr *symtab_hdr;
6167 Elf_Internal_Rela *internal_relocs = NULL;
6168 Elf_Internal_Rela *irel, *irelend;
6169 bfd_byte *contents = NULL;
6172 struct elf32_arm_link_hash_table *globals;
6174 /* If we are only performing a partial link do not bother
6175 to construct any glue. */
6176 if (link_info->relocatable)
6179 /* Here we have a bfd that is to be included on the link. We have a
6180 hook to do reloc rummaging, before section sizes are nailed down. */
6181 globals = elf32_arm_hash_table (link_info);
6182 BFD_ASSERT (globals != NULL);
6184 check_use_blx (globals);
6186 if (globals->byteswap_code && !bfd_big_endian (abfd))
6188 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6193 /* PR 5398: If we have not decided to include any loadable sections in
6194 the output then we will not have a glue owner bfd. This is OK, it
6195 just means that there is nothing else for us to do here. */
6196 if (globals->bfd_of_glue_owner == NULL)
6199 /* Rummage around all the relocs and map the glue vectors. */
6200 sec = abfd->sections;
6205 for (; sec != NULL; sec = sec->next)
6207 if (sec->reloc_count == 0)
6210 if ((sec->flags & SEC_EXCLUDE) != 0)
6213 symtab_hdr = & elf_symtab_hdr (abfd);
6215 /* Load the relocs. */
6217 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6219 if (internal_relocs == NULL)
6222 irelend = internal_relocs + sec->reloc_count;
6223 for (irel = internal_relocs; irel < irelend; irel++)
6226 unsigned long r_index;
6228 struct elf_link_hash_entry *h;
6230 r_type = ELF32_R_TYPE (irel->r_info);
6231 r_index = ELF32_R_SYM (irel->r_info);
6233 /* These are the only relocation types we care about. */
6234 if ( r_type != R_ARM_PC24
6235 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6238 /* Get the section contents if we haven't done so already. */
6239 if (contents == NULL)
6241 /* Get cached copy if it exists. */
6242 if (elf_section_data (sec)->this_hdr.contents != NULL)
6243 contents = elf_section_data (sec)->this_hdr.contents;
6246 /* Go get them off disk. */
6247 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6252 if (r_type == R_ARM_V4BX)
6256 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6257 record_arm_bx_glue (link_info, reg);
6261 /* If the relocation is not against a symbol it cannot concern us. */
6264 /* We don't care about local symbols. */
6265 if (r_index < symtab_hdr->sh_info)
6268 /* This is an external symbol. */
6269 r_index -= symtab_hdr->sh_info;
6270 h = (struct elf_link_hash_entry *)
6271 elf_sym_hashes (abfd)[r_index];
6273 /* If the relocation is against a static symbol it must be within
6274 the current section and so cannot be a cross ARM/Thumb relocation. */
6278 /* If the call will go through a PLT entry then we do not need
6280 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6286 /* This one is a call from arm code. We need to look up
6287 the target of the call. If it is a thumb target, we
6289 if (h->target_internal == ST_BRANCH_TO_THUMB)
6290 record_arm_to_thumb_glue (link_info, h);
6298 if (contents != NULL
6299 && elf_section_data (sec)->this_hdr.contents != contents)
6303 if (internal_relocs != NULL
6304 && elf_section_data (sec)->relocs != internal_relocs)
6305 free (internal_relocs);
6306 internal_relocs = NULL;
6312 if (contents != NULL
6313 && elf_section_data (sec)->this_hdr.contents != contents)
6315 if (internal_relocs != NULL
6316 && elf_section_data (sec)->relocs != internal_relocs)
6317 free (internal_relocs);
6324 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6327 bfd_elf32_arm_init_maps (bfd *abfd)
6329 Elf_Internal_Sym *isymbuf;
6330 Elf_Internal_Shdr *hdr;
6331 unsigned int i, localsyms;
6333 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6334 if (! is_arm_elf (abfd))
6337 if ((abfd->flags & DYNAMIC) != 0)
6340 hdr = & elf_symtab_hdr (abfd);
6341 localsyms = hdr->sh_info;
6343 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6344 should contain the number of local symbols, which should come before any
6345 global symbols. Mapping symbols are always local. */
6346 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6349 /* No internal symbols read? Skip this BFD. */
6350 if (isymbuf == NULL)
6353 for (i = 0; i < localsyms; i++)
6355 Elf_Internal_Sym *isym = &isymbuf[i];
6356 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6360 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6362 name = bfd_elf_string_from_elf_section (abfd,
6363 hdr->sh_link, isym->st_name);
6365 if (bfd_is_arm_special_symbol_name (name,
6366 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6367 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6373 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6374 say what they wanted. */
6377 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6379 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6380 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6382 if (globals == NULL)
6385 if (globals->fix_cortex_a8 == -1)
6387 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6388 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6389 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6390 || out_attr[Tag_CPU_arch_profile].i == 0))
6391 globals->fix_cortex_a8 = 1;
6393 globals->fix_cortex_a8 = 0;
6399 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6401 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6402 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6404 if (globals == NULL)
6406 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6407 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6409 switch (globals->vfp11_fix)
6411 case BFD_ARM_VFP11_FIX_DEFAULT:
6412 case BFD_ARM_VFP11_FIX_NONE:
6413 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6417 /* Give a warning, but do as the user requests anyway. */
6418 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6419 "workaround is not necessary for target architecture"), obfd);
6422 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6423 /* For earlier architectures, we might need the workaround, but do not
6424 enable it by default. If users is running with broken hardware, they
6425 must enable the erratum fix explicitly. */
6426 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6430 enum bfd_arm_vfp11_pipe
6438 /* Return a VFP register number. This is encoded as RX:X for single-precision
6439 registers, or X:RX for double-precision registers, where RX is the group of
6440 four bits in the instruction encoding and X is the single extension bit.
6441 RX and X fields are specified using their lowest (starting) bit. The return
6444 0...31: single-precision registers s0...s31
6445 32...63: double-precision registers d0...d31.
6447 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6448 encounter VFP3 instructions, so we allow the full range for DP registers. */
6451 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6455 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6457 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6460 /* Set bits in *WMASK according to a register number REG as encoded by
6461 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6464 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6469 *wmask |= 3 << ((reg - 32) * 2);
6472 /* Return TRUE if WMASK overwrites anything in REGS. */
6475 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6479 for (i = 0; i < numregs; i++)
6481 unsigned int reg = regs[i];
6483 if (reg < 32 && (wmask & (1 << reg)) != 0)
6491 if ((wmask & (3 << (reg * 2))) != 0)
6498 /* In this function, we're interested in two things: finding input registers
6499 for VFP data-processing instructions, and finding the set of registers which
6500 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6501 hold the written set, so FLDM etc. are easy to deal with (we're only
6502 interested in 32 SP registers or 16 dp registers, due to the VFP version
6503 implemented by the chip in question). DP registers are marked by setting
6504 both SP registers in the write mask). */
6506 static enum bfd_arm_vfp11_pipe
6507 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6510 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6511 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6513 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6516 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6517 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6519 pqrs = ((insn & 0x00800000) >> 20)
6520 | ((insn & 0x00300000) >> 19)
6521 | ((insn & 0x00000040) >> 6);
6525 case 0: /* fmac[sd]. */
6526 case 1: /* fnmac[sd]. */
6527 case 2: /* fmsc[sd]. */
6528 case 3: /* fnmsc[sd]. */
6530 bfd_arm_vfp11_write_mask (destmask, fd);
6532 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6537 case 4: /* fmul[sd]. */
6538 case 5: /* fnmul[sd]. */
6539 case 6: /* fadd[sd]. */
6540 case 7: /* fsub[sd]. */
6544 case 8: /* fdiv[sd]. */
6547 bfd_arm_vfp11_write_mask (destmask, fd);
6548 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6553 case 15: /* extended opcode. */
6555 unsigned int extn = ((insn >> 15) & 0x1e)
6556 | ((insn >> 7) & 1);
6560 case 0: /* fcpy[sd]. */
6561 case 1: /* fabs[sd]. */
6562 case 2: /* fneg[sd]. */
6563 case 8: /* fcmp[sd]. */
6564 case 9: /* fcmpe[sd]. */
6565 case 10: /* fcmpz[sd]. */
6566 case 11: /* fcmpez[sd]. */
6567 case 16: /* fuito[sd]. */
6568 case 17: /* fsito[sd]. */
6569 case 24: /* ftoui[sd]. */
6570 case 25: /* ftouiz[sd]. */
6571 case 26: /* ftosi[sd]. */
6572 case 27: /* ftosiz[sd]. */
6573 /* These instructions will not bounce due to underflow. */
6578 case 3: /* fsqrt[sd]. */
6579 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6580 registers to cause the erratum in previous instructions. */
6581 bfd_arm_vfp11_write_mask (destmask, fd);
6585 case 15: /* fcvt{ds,sd}. */
6589 bfd_arm_vfp11_write_mask (destmask, fd);
6591 /* Only FCVTSD can underflow. */
6592 if ((insn & 0x100) != 0)
6611 /* Two-register transfer. */
6612 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6614 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6616 if ((insn & 0x100000) == 0)
6619 bfd_arm_vfp11_write_mask (destmask, fm);
6622 bfd_arm_vfp11_write_mask (destmask, fm);
6623 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6629 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6631 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6632 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6636 case 0: /* Two-reg transfer. We should catch these above. */
6639 case 2: /* fldm[sdx]. */
6643 unsigned int i, offset = insn & 0xff;
6648 for (i = fd; i < fd + offset; i++)
6649 bfd_arm_vfp11_write_mask (destmask, i);
6653 case 4: /* fld[sd]. */
6655 bfd_arm_vfp11_write_mask (destmask, fd);
6664 /* Single-register transfer. Note L==0. */
6665 else if ((insn & 0x0f100e10) == 0x0e000a10)
6667 unsigned int opcode = (insn >> 21) & 7;
6668 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6672 case 0: /* fmsr/fmdlr. */
6673 case 1: /* fmdhr. */
6674 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6675 destination register. I don't know if this is exactly right,
6676 but it is the conservative choice. */
6677 bfd_arm_vfp11_write_mask (destmask, fn);
6691 static int elf32_arm_compare_mapping (const void * a, const void * b);
6694 /* Look for potentially-troublesome code sequences which might trigger the
6695 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6696 (available from ARM) for details of the erratum. A short version is
6697 described in ld.texinfo. */
6700 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6703 bfd_byte *contents = NULL;
6705 int regs[3], numregs = 0;
6706 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6707 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6709 if (globals == NULL)
6712 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6713 The states transition as follows:
6715 0 -> 1 (vector) or 0 -> 2 (scalar)
6716 A VFP FMAC-pipeline instruction has been seen. Fill
6717 regs[0]..regs[numregs-1] with its input operands. Remember this
6718 instruction in 'first_fmac'.
6721 Any instruction, except for a VFP instruction which overwrites
6726 A VFP instruction has been seen which overwrites any of regs[*].
6727 We must make a veneer! Reset state to 0 before examining next
6731 If we fail to match anything in state 2, reset to state 0 and reset
6732 the instruction pointer to the instruction after 'first_fmac'.
6734 If the VFP11 vector mode is in use, there must be at least two unrelated
6735 instructions between anti-dependent VFP11 instructions to properly avoid
6736 triggering the erratum, hence the use of the extra state 1. */
6738 /* If we are only performing a partial link do not bother
6739 to construct any glue. */
6740 if (link_info->relocatable)
6743 /* Skip if this bfd does not correspond to an ELF image. */
6744 if (! is_arm_elf (abfd))
6747 /* We should have chosen a fix type by the time we get here. */
6748 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6750 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6753 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6754 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6757 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6759 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6760 struct _arm_elf_section_data *sec_data;
6762 /* If we don't have executable progbits, we're not interested in this
6763 section. Also skip if section is to be excluded. */
6764 if (elf_section_type (sec) != SHT_PROGBITS
6765 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6766 || (sec->flags & SEC_EXCLUDE) != 0
6767 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6768 || sec->output_section == bfd_abs_section_ptr
6769 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6772 sec_data = elf32_arm_section_data (sec);
6774 if (sec_data->mapcount == 0)
6777 if (elf_section_data (sec)->this_hdr.contents != NULL)
6778 contents = elf_section_data (sec)->this_hdr.contents;
6779 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6782 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6783 elf32_arm_compare_mapping);
6785 for (span = 0; span < sec_data->mapcount; span++)
6787 unsigned int span_start = sec_data->map[span].vma;
6788 unsigned int span_end = (span == sec_data->mapcount - 1)
6789 ? sec->size : sec_data->map[span + 1].vma;
6790 char span_type = sec_data->map[span].type;
6792 /* FIXME: Only ARM mode is supported at present. We may need to
6793 support Thumb-2 mode also at some point. */
6794 if (span_type != 'a')
6797 for (i = span_start; i < span_end;)
6799 unsigned int next_i = i + 4;
6800 unsigned int insn = bfd_big_endian (abfd)
6801 ? (contents[i] << 24)
6802 | (contents[i + 1] << 16)
6803 | (contents[i + 2] << 8)
6805 : (contents[i + 3] << 24)
6806 | (contents[i + 2] << 16)
6807 | (contents[i + 1] << 8)
6809 unsigned int writemask = 0;
6810 enum bfd_arm_vfp11_pipe vpipe;
6815 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6817 /* I'm assuming the VFP11 erratum can trigger with denorm
6818 operands on either the FMAC or the DS pipeline. This might
6819 lead to slightly overenthusiastic veneer insertion. */
6820 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6822 state = use_vector ? 1 : 2;
6824 veneer_of_insn = insn;
6830 int other_regs[3], other_numregs;
6831 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6834 if (vpipe != VFP11_BAD
6835 && bfd_arm_vfp11_antidependency (writemask, regs,
6845 int other_regs[3], other_numregs;
6846 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6849 if (vpipe != VFP11_BAD
6850 && bfd_arm_vfp11_antidependency (writemask, regs,
6856 next_i = first_fmac + 4;
6862 abort (); /* Should be unreachable. */
6867 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6868 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6870 elf32_arm_section_data (sec)->erratumcount += 1;
6872 newerr->u.b.vfp_insn = veneer_of_insn;
6877 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6884 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6889 newerr->next = sec_data->erratumlist;
6890 sec_data->erratumlist = newerr;
6899 if (contents != NULL
6900 && elf_section_data (sec)->this_hdr.contents != contents)
6908 if (contents != NULL
6909 && elf_section_data (sec)->this_hdr.contents != contents)
6915 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6916 after sections have been laid out, using specially-named symbols. */
6919 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6920 struct bfd_link_info *link_info)
6923 struct elf32_arm_link_hash_table *globals;
6926 if (link_info->relocatable)
6929 /* Skip if this bfd does not correspond to an ELF image. */
6930 if (! is_arm_elf (abfd))
6933 globals = elf32_arm_hash_table (link_info);
6934 if (globals == NULL)
6937 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6938 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6940 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6942 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6943 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6945 for (; errnode != NULL; errnode = errnode->next)
6947 struct elf_link_hash_entry *myh;
6950 switch (errnode->type)
6952 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6953 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6954 /* Find veneer symbol. */
6955 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6956 errnode->u.b.veneer->u.v.id);
6958 myh = elf_link_hash_lookup
6959 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6962 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6963 "`%s'"), abfd, tmp_name);
6965 vma = myh->root.u.def.section->output_section->vma
6966 + myh->root.u.def.section->output_offset
6967 + myh->root.u.def.value;
6969 errnode->u.b.veneer->vma = vma;
6972 case VFP11_ERRATUM_ARM_VENEER:
6973 case VFP11_ERRATUM_THUMB_VENEER:
6974 /* Find return location. */
6975 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6978 myh = elf_link_hash_lookup
6979 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6982 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6983 "`%s'"), abfd, tmp_name);
6985 vma = myh->root.u.def.section->output_section->vma
6986 + myh->root.u.def.section->output_offset
6987 + myh->root.u.def.value;
6989 errnode->u.v.branch->vma = vma;
7002 /* Set target relocation values needed during linking. */
7005 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7006 struct bfd_link_info *link_info,
7008 char * target2_type,
7011 bfd_arm_vfp11_fix vfp11_fix,
7012 int no_enum_warn, int no_wchar_warn,
7013 int pic_veneer, int fix_cortex_a8,
7016 struct elf32_arm_link_hash_table *globals;
7018 globals = elf32_arm_hash_table (link_info);
7019 if (globals == NULL)
7022 globals->target1_is_rel = target1_is_rel;
7023 if (strcmp (target2_type, "rel") == 0)
7024 globals->target2_reloc = R_ARM_REL32;
7025 else if (strcmp (target2_type, "abs") == 0)
7026 globals->target2_reloc = R_ARM_ABS32;
7027 else if (strcmp (target2_type, "got-rel") == 0)
7028 globals->target2_reloc = R_ARM_GOT_PREL;
7031 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7034 globals->fix_v4bx = fix_v4bx;
7035 globals->use_blx |= use_blx;
7036 globals->vfp11_fix = vfp11_fix;
7037 globals->pic_veneer = pic_veneer;
7038 globals->fix_cortex_a8 = fix_cortex_a8;
7039 globals->fix_arm1176 = fix_arm1176;
7041 BFD_ASSERT (is_arm_elf (output_bfd));
7042 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7043 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7046 /* Replace the target offset of a Thumb bl or b.w instruction. */
7049 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7055 BFD_ASSERT ((offset & 1) == 0);
7057 upper = bfd_get_16 (abfd, insn);
7058 lower = bfd_get_16 (abfd, insn + 2);
7059 reloc_sign = (offset < 0) ? 1 : 0;
7060 upper = (upper & ~(bfd_vma) 0x7ff)
7061 | ((offset >> 12) & 0x3ff)
7062 | (reloc_sign << 10);
7063 lower = (lower & ~(bfd_vma) 0x2fff)
7064 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7065 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7066 | ((offset >> 1) & 0x7ff);
7067 bfd_put_16 (abfd, upper, insn);
7068 bfd_put_16 (abfd, lower, insn + 2);
7071 /* Thumb code calling an ARM function. */
7074 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7078 asection * input_section,
7079 bfd_byte * hit_data,
7082 bfd_signed_vma addend,
7084 char **error_message)
7088 long int ret_offset;
7089 struct elf_link_hash_entry * myh;
7090 struct elf32_arm_link_hash_table * globals;
7092 myh = find_thumb_glue (info, name, error_message);
7096 globals = elf32_arm_hash_table (info);
7097 BFD_ASSERT (globals != NULL);
7098 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7100 my_offset = myh->root.u.def.value;
7102 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7103 THUMB2ARM_GLUE_SECTION_NAME);
7105 BFD_ASSERT (s != NULL);
7106 BFD_ASSERT (s->contents != NULL);
7107 BFD_ASSERT (s->output_section != NULL);
7109 if ((my_offset & 0x01) == 0x01)
7112 && sym_sec->owner != NULL
7113 && !INTERWORK_FLAG (sym_sec->owner))
7115 (*_bfd_error_handler)
7116 (_("%B(%s): warning: interworking not enabled.\n"
7117 " first occurrence: %B: Thumb call to ARM"),
7118 sym_sec->owner, input_bfd, name);
7124 myh->root.u.def.value = my_offset;
7126 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7127 s->contents + my_offset);
7129 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7130 s->contents + my_offset + 2);
7133 /* Address of destination of the stub. */
7134 ((bfd_signed_vma) val)
7136 /* Offset from the start of the current section
7137 to the start of the stubs. */
7139 /* Offset of the start of this stub from the start of the stubs. */
7141 /* Address of the start of the current section. */
7142 + s->output_section->vma)
7143 /* The branch instruction is 4 bytes into the stub. */
7145 /* ARM branches work from the pc of the instruction + 8. */
7148 put_arm_insn (globals, output_bfd,
7149 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7150 s->contents + my_offset + 4);
7153 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7155 /* Now go back and fix up the original BL insn to point to here. */
7157 /* Address of where the stub is located. */
7158 (s->output_section->vma + s->output_offset + my_offset)
7159 /* Address of where the BL is located. */
7160 - (input_section->output_section->vma + input_section->output_offset
7162 /* Addend in the relocation. */
7164 /* Biassing for PC-relative addressing. */
7167 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7172 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7174 static struct elf_link_hash_entry *
7175 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7182 char ** error_message)
7185 long int ret_offset;
7186 struct elf_link_hash_entry * myh;
7187 struct elf32_arm_link_hash_table * globals;
7189 myh = find_arm_glue (info, name, error_message);
7193 globals = elf32_arm_hash_table (info);
7194 BFD_ASSERT (globals != NULL);
7195 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7197 my_offset = myh->root.u.def.value;
7199 if ((my_offset & 0x01) == 0x01)
7202 && sym_sec->owner != NULL
7203 && !INTERWORK_FLAG (sym_sec->owner))
7205 (*_bfd_error_handler)
7206 (_("%B(%s): warning: interworking not enabled.\n"
7207 " first occurrence: %B: arm call to thumb"),
7208 sym_sec->owner, input_bfd, name);
7212 myh->root.u.def.value = my_offset;
7214 if (info->shared || globals->root.is_relocatable_executable
7215 || globals->pic_veneer)
7217 /* For relocatable objects we can't use absolute addresses,
7218 so construct the address from a relative offset. */
7219 /* TODO: If the offset is small it's probably worth
7220 constructing the address with adds. */
7221 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7222 s->contents + my_offset);
7223 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7224 s->contents + my_offset + 4);
7225 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7226 s->contents + my_offset + 8);
7227 /* Adjust the offset by 4 for the position of the add,
7228 and 8 for the pipeline offset. */
7229 ret_offset = (val - (s->output_offset
7230 + s->output_section->vma
7233 bfd_put_32 (output_bfd, ret_offset,
7234 s->contents + my_offset + 12);
7236 else if (globals->use_blx)
7238 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7239 s->contents + my_offset);
7241 /* It's a thumb address. Add the low order bit. */
7242 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7243 s->contents + my_offset + 4);
7247 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7248 s->contents + my_offset);
7250 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7251 s->contents + my_offset + 4);
7253 /* It's a thumb address. Add the low order bit. */
7254 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7255 s->contents + my_offset + 8);
7261 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7266 /* Arm code calling a Thumb function. */
7269 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7273 asection * input_section,
7274 bfd_byte * hit_data,
7277 bfd_signed_vma addend,
7279 char **error_message)
7281 unsigned long int tmp;
7284 long int ret_offset;
7285 struct elf_link_hash_entry * myh;
7286 struct elf32_arm_link_hash_table * globals;
7288 globals = elf32_arm_hash_table (info);
7289 BFD_ASSERT (globals != NULL);
7290 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7292 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7293 ARM2THUMB_GLUE_SECTION_NAME);
7294 BFD_ASSERT (s != NULL);
7295 BFD_ASSERT (s->contents != NULL);
7296 BFD_ASSERT (s->output_section != NULL);
7298 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7299 sym_sec, val, s, error_message);
7303 my_offset = myh->root.u.def.value;
7304 tmp = bfd_get_32 (input_bfd, hit_data);
7305 tmp = tmp & 0xFF000000;
7307 /* Somehow these are both 4 too far, so subtract 8. */
7308 ret_offset = (s->output_offset
7310 + s->output_section->vma
7311 - (input_section->output_offset
7312 + input_section->output_section->vma
7316 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7318 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7323 /* Populate Arm stub for an exported Thumb function. */
7326 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7328 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7330 struct elf_link_hash_entry * myh;
7331 struct elf32_arm_link_hash_entry *eh;
7332 struct elf32_arm_link_hash_table * globals;
7335 char *error_message;
7337 eh = elf32_arm_hash_entry (h);
7338 /* Allocate stubs for exported Thumb functions on v4t. */
7339 if (eh->export_glue == NULL)
7342 globals = elf32_arm_hash_table (info);
7343 BFD_ASSERT (globals != NULL);
7344 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7346 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7347 ARM2THUMB_GLUE_SECTION_NAME);
7348 BFD_ASSERT (s != NULL);
7349 BFD_ASSERT (s->contents != NULL);
7350 BFD_ASSERT (s->output_section != NULL);
7352 sec = eh->export_glue->root.u.def.section;
7354 BFD_ASSERT (sec->output_section != NULL);
7356 val = eh->export_glue->root.u.def.value + sec->output_offset
7357 + sec->output_section->vma;
7359 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7360 h->root.u.def.section->owner,
7361 globals->obfd, sec, val, s,
7367 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7370 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7375 struct elf32_arm_link_hash_table *globals;
7377 globals = elf32_arm_hash_table (info);
7378 BFD_ASSERT (globals != NULL);
7379 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7381 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7382 ARM_BX_GLUE_SECTION_NAME);
7383 BFD_ASSERT (s != NULL);
7384 BFD_ASSERT (s->contents != NULL);
7385 BFD_ASSERT (s->output_section != NULL);
7387 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7389 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7391 if ((globals->bx_glue_offset[reg] & 1) == 0)
7393 p = s->contents + glue_addr;
7394 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7395 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7396 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7397 globals->bx_glue_offset[reg] |= 1;
7400 return glue_addr + s->output_section->vma + s->output_offset;
7403 /* Generate Arm stubs for exported Thumb symbols. */
7405 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7406 struct bfd_link_info *link_info)
7408 struct elf32_arm_link_hash_table * globals;
7410 if (link_info == NULL)
7411 /* Ignore this if we are not called by the ELF backend linker. */
7414 globals = elf32_arm_hash_table (link_info);
7415 if (globals == NULL)
7418 /* If blx is available then exported Thumb symbols are OK and there is
7420 if (globals->use_blx)
7423 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7427 /* Reserve space for COUNT dynamic relocations in relocation selection
7431 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7432 bfd_size_type count)
7434 struct elf32_arm_link_hash_table *htab;
7436 htab = elf32_arm_hash_table (info);
7437 BFD_ASSERT (htab->root.dynamic_sections_created);
7440 sreloc->size += RELOC_SIZE (htab) * count;
7443 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7444 dynamic, the relocations should go in SRELOC, otherwise they should
7445 go in the special .rel.iplt section. */
7448 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7449 bfd_size_type count)
7451 struct elf32_arm_link_hash_table *htab;
7453 htab = elf32_arm_hash_table (info);
7454 if (!htab->root.dynamic_sections_created)
7455 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7458 BFD_ASSERT (sreloc != NULL);
7459 sreloc->size += RELOC_SIZE (htab) * count;
7463 /* Add relocation REL to the end of relocation section SRELOC. */
7466 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7467 asection *sreloc, Elf_Internal_Rela *rel)
7470 struct elf32_arm_link_hash_table *htab;
7472 htab = elf32_arm_hash_table (info);
7473 if (!htab->root.dynamic_sections_created
7474 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7475 sreloc = htab->root.irelplt;
7478 loc = sreloc->contents;
7479 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7480 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7482 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7485 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7486 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7490 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7491 bfd_boolean is_iplt_entry,
7492 union gotplt_union *root_plt,
7493 struct arm_plt_info *arm_plt)
7495 struct elf32_arm_link_hash_table *htab;
7499 htab = elf32_arm_hash_table (info);
7503 splt = htab->root.iplt;
7504 sgotplt = htab->root.igotplt;
7506 /* NaCl uses a special first entry in .iplt too. */
7507 if (htab->nacl_p && splt->size == 0)
7508 splt->size += htab->plt_header_size;
7510 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7511 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7515 splt = htab->root.splt;
7516 sgotplt = htab->root.sgotplt;
7518 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7519 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7521 /* If this is the first .plt entry, make room for the special
7523 if (splt->size == 0)
7524 splt->size += htab->plt_header_size;
7526 htab->next_tls_desc_index++;
7529 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7530 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7531 splt->size += PLT_THUMB_STUB_SIZE;
7532 root_plt->offset = splt->size;
7533 splt->size += htab->plt_entry_size;
7535 if (!htab->symbian_p)
7537 /* We also need to make an entry in the .got.plt section, which
7538 will be placed in the .got section by the linker script. */
7540 arm_plt->got_offset = sgotplt->size;
7542 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7548 arm_movw_immediate (bfd_vma value)
7550 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7554 arm_movt_immediate (bfd_vma value)
7556 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7559 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7560 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7561 Otherwise, DYNINDX is the index of the symbol in the dynamic
7562 symbol table and SYM_VALUE is undefined.
7564 ROOT_PLT points to the offset of the PLT entry from the start of its
7565 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7566 bookkeeping information.
7568 Returns FALSE if there was a problem. */
7571 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7572 union gotplt_union *root_plt,
7573 struct arm_plt_info *arm_plt,
7574 int dynindx, bfd_vma sym_value)
7576 struct elf32_arm_link_hash_table *htab;
7582 Elf_Internal_Rela rel;
7583 bfd_vma plt_header_size;
7584 bfd_vma got_header_size;
7586 htab = elf32_arm_hash_table (info);
7588 /* Pick the appropriate sections and sizes. */
7591 splt = htab->root.iplt;
7592 sgot = htab->root.igotplt;
7593 srel = htab->root.irelplt;
7595 /* There are no reserved entries in .igot.plt, and no special
7596 first entry in .iplt. */
7597 got_header_size = 0;
7598 plt_header_size = 0;
7602 splt = htab->root.splt;
7603 sgot = htab->root.sgotplt;
7604 srel = htab->root.srelplt;
7606 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7607 plt_header_size = htab->plt_header_size;
7609 BFD_ASSERT (splt != NULL && srel != NULL);
7611 /* Fill in the entry in the procedure linkage table. */
7612 if (htab->symbian_p)
7614 BFD_ASSERT (dynindx >= 0);
7615 put_arm_insn (htab, output_bfd,
7616 elf32_arm_symbian_plt_entry[0],
7617 splt->contents + root_plt->offset);
7618 bfd_put_32 (output_bfd,
7619 elf32_arm_symbian_plt_entry[1],
7620 splt->contents + root_plt->offset + 4);
7622 /* Fill in the entry in the .rel.plt section. */
7623 rel.r_offset = (splt->output_section->vma
7624 + splt->output_offset
7625 + root_plt->offset + 4);
7626 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7628 /* Get the index in the procedure linkage table which
7629 corresponds to this symbol. This is the index of this symbol
7630 in all the symbols for which we are making plt entries. The
7631 first entry in the procedure linkage table is reserved. */
7632 plt_index = ((root_plt->offset - plt_header_size)
7633 / htab->plt_entry_size);
7637 bfd_vma got_offset, got_address, plt_address;
7638 bfd_vma got_displacement, initial_got_entry;
7641 BFD_ASSERT (sgot != NULL);
7643 /* Get the offset into the .(i)got.plt table of the entry that
7644 corresponds to this function. */
7645 got_offset = (arm_plt->got_offset & -2);
7647 /* Get the index in the procedure linkage table which
7648 corresponds to this symbol. This is the index of this symbol
7649 in all the symbols for which we are making plt entries.
7650 After the reserved .got.plt entries, all symbols appear in
7651 the same order as in .plt. */
7652 plt_index = (got_offset - got_header_size) / 4;
7654 /* Calculate the address of the GOT entry. */
7655 got_address = (sgot->output_section->vma
7656 + sgot->output_offset
7659 /* ...and the address of the PLT entry. */
7660 plt_address = (splt->output_section->vma
7661 + splt->output_offset
7662 + root_plt->offset);
7664 ptr = splt->contents + root_plt->offset;
7665 if (htab->vxworks_p && info->shared)
7670 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7672 val = elf32_arm_vxworks_shared_plt_entry[i];
7674 val |= got_address - sgot->output_section->vma;
7676 val |= plt_index * RELOC_SIZE (htab);
7677 if (i == 2 || i == 5)
7678 bfd_put_32 (output_bfd, val, ptr);
7680 put_arm_insn (htab, output_bfd, val, ptr);
7683 else if (htab->vxworks_p)
7688 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7690 val = elf32_arm_vxworks_exec_plt_entry[i];
7694 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7696 val |= plt_index * RELOC_SIZE (htab);
7697 if (i == 2 || i == 5)
7698 bfd_put_32 (output_bfd, val, ptr);
7700 put_arm_insn (htab, output_bfd, val, ptr);
7703 loc = (htab->srelplt2->contents
7704 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7706 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7707 referencing the GOT for this PLT entry. */
7708 rel.r_offset = plt_address + 8;
7709 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7710 rel.r_addend = got_offset;
7711 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7712 loc += RELOC_SIZE (htab);
7714 /* Create the R_ARM_ABS32 relocation referencing the
7715 beginning of the PLT for this GOT entry. */
7716 rel.r_offset = got_address;
7717 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7719 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7721 else if (htab->nacl_p)
7723 /* Calculate the displacement between the PLT slot and the
7724 common tail that's part of the special initial PLT slot. */
7725 int32_t tail_displacement
7726 = ((splt->output_section->vma + splt->output_offset
7727 + ARM_NACL_PLT_TAIL_OFFSET)
7728 - (plt_address + htab->plt_entry_size + 4));
7729 BFD_ASSERT ((tail_displacement & 3) == 0);
7730 tail_displacement >>= 2;
7732 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7733 || (-tail_displacement & 0xff000000) == 0);
7735 /* Calculate the displacement between the PLT slot and the entry
7736 in the GOT. The offset accounts for the value produced by
7737 adding to pc in the penultimate instruction of the PLT stub. */
7738 got_displacement = (got_address
7739 - (plt_address + htab->plt_entry_size));
7741 /* NaCl does not support interworking at all. */
7742 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7744 put_arm_insn (htab, output_bfd,
7745 elf32_arm_nacl_plt_entry[0]
7746 | arm_movw_immediate (got_displacement),
7748 put_arm_insn (htab, output_bfd,
7749 elf32_arm_nacl_plt_entry[1]
7750 | arm_movt_immediate (got_displacement),
7752 put_arm_insn (htab, output_bfd,
7753 elf32_arm_nacl_plt_entry[2],
7755 put_arm_insn (htab, output_bfd,
7756 elf32_arm_nacl_plt_entry[3]
7757 | (tail_displacement & 0x00ffffff),
7760 else if (using_thumb_only (htab))
7762 /* PR ld/16017: Generate thumb only PLT entries. */
7763 if (!using_thumb2 (htab))
7765 /* FIXME: We ought to be able to generate thumb-1 PLT
7767 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
7772 /* Calculate the displacement between the PLT slot and the entry in
7773 the GOT. The 12-byte offset accounts for the value produced by
7774 adding to pc in the 3rd instruction of the PLT stub. */
7775 got_displacement = got_address - (plt_address + 12);
7777 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
7778 instead of 'put_thumb_insn'. */
7779 put_arm_insn (htab, output_bfd,
7780 elf32_thumb2_plt_entry[0]
7781 | ((got_displacement & 0x000000ff) << 16)
7782 | ((got_displacement & 0x00000700) << 20)
7783 | ((got_displacement & 0x00000800) >> 1)
7784 | ((got_displacement & 0x0000f000) >> 12),
7786 put_arm_insn (htab, output_bfd,
7787 elf32_thumb2_plt_entry[1]
7788 | ((got_displacement & 0x00ff0000) )
7789 | ((got_displacement & 0x07000000) << 4)
7790 | ((got_displacement & 0x08000000) >> 17)
7791 | ((got_displacement & 0xf0000000) >> 28),
7793 put_arm_insn (htab, output_bfd,
7794 elf32_thumb2_plt_entry[2],
7796 put_arm_insn (htab, output_bfd,
7797 elf32_thumb2_plt_entry[3],
7802 /* Calculate the displacement between the PLT slot and the
7803 entry in the GOT. The eight-byte offset accounts for the
7804 value produced by adding to pc in the first instruction
7806 got_displacement = got_address - (plt_address + 8);
7808 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7810 put_thumb_insn (htab, output_bfd,
7811 elf32_arm_plt_thumb_stub[0], ptr - 4);
7812 put_thumb_insn (htab, output_bfd,
7813 elf32_arm_plt_thumb_stub[1], ptr - 2);
7816 if (!elf32_arm_use_long_plt_entry)
7818 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7820 put_arm_insn (htab, output_bfd,
7821 elf32_arm_plt_entry_short[0]
7822 | ((got_displacement & 0x0ff00000) >> 20),
7824 put_arm_insn (htab, output_bfd,
7825 elf32_arm_plt_entry_short[1]
7826 | ((got_displacement & 0x000ff000) >> 12),
7828 put_arm_insn (htab, output_bfd,
7829 elf32_arm_plt_entry_short[2]
7830 | (got_displacement & 0x00000fff),
7832 #ifdef FOUR_WORD_PLT
7833 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
7838 put_arm_insn (htab, output_bfd,
7839 elf32_arm_plt_entry_long[0]
7840 | ((got_displacement & 0xf0000000) >> 28),
7842 put_arm_insn (htab, output_bfd,
7843 elf32_arm_plt_entry_long[1]
7844 | ((got_displacement & 0x0ff00000) >> 20),
7846 put_arm_insn (htab, output_bfd,
7847 elf32_arm_plt_entry_long[2]
7848 | ((got_displacement & 0x000ff000) >> 12),
7850 put_arm_insn (htab, output_bfd,
7851 elf32_arm_plt_entry_long[3]
7852 | (got_displacement & 0x00000fff),
7857 /* Fill in the entry in the .rel(a).(i)plt section. */
7858 rel.r_offset = got_address;
7862 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7863 The dynamic linker or static executable then calls SYM_VALUE
7864 to determine the correct run-time value of the .igot.plt entry. */
7865 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7866 initial_got_entry = sym_value;
7870 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7871 initial_got_entry = (splt->output_section->vma
7872 + splt->output_offset);
7875 /* Fill in the entry in the global offset table. */
7876 bfd_put_32 (output_bfd, initial_got_entry,
7877 sgot->contents + got_offset);
7881 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
7884 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7885 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7891 /* Some relocations map to different relocations depending on the
7892 target. Return the real relocation. */
7895 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7901 if (globals->target1_is_rel)
7907 return globals->target2_reloc;
7914 /* Return the base VMA address which should be subtracted from real addresses
7915 when resolving @dtpoff relocation.
7916 This is PT_TLS segment p_vaddr. */
7919 dtpoff_base (struct bfd_link_info *info)
7921 /* If tls_sec is NULL, we should have signalled an error already. */
7922 if (elf_hash_table (info)->tls_sec == NULL)
7924 return elf_hash_table (info)->tls_sec->vma;
7927 /* Return the relocation value for @tpoff relocation
7928 if STT_TLS virtual address is ADDRESS. */
7931 tpoff (struct bfd_link_info *info, bfd_vma address)
7933 struct elf_link_hash_table *htab = elf_hash_table (info);
7936 /* If tls_sec is NULL, we should have signalled an error already. */
7937 if (htab->tls_sec == NULL)
7939 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7940 return address - htab->tls_sec->vma + base;
7943 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7944 VALUE is the relocation value. */
7946 static bfd_reloc_status_type
7947 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7950 return bfd_reloc_overflow;
7952 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7953 bfd_put_32 (abfd, value, data);
7954 return bfd_reloc_ok;
7957 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7958 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7959 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7961 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7962 is to then call final_link_relocate. Return other values in the
7965 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7966 the pre-relaxed code. It would be nice if the relocs were updated
7967 to match the optimization. */
7969 static bfd_reloc_status_type
7970 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7971 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7972 Elf_Internal_Rela *rel, unsigned long is_local)
7976 switch (ELF32_R_TYPE (rel->r_info))
7979 return bfd_reloc_notsupported;
7981 case R_ARM_TLS_GOTDESC:
7986 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7988 insn -= 5; /* THUMB */
7990 insn -= 8; /* ARM */
7992 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7993 return bfd_reloc_continue;
7995 case R_ARM_THM_TLS_DESCSEQ:
7997 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7998 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8002 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8004 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8008 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8011 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8013 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8017 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8020 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8021 contents + rel->r_offset);
8025 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8026 /* It's a 32 bit instruction, fetch the rest of it for
8027 error generation. */
8029 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8030 (*_bfd_error_handler)
8031 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8032 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8033 return bfd_reloc_notsupported;
8037 case R_ARM_TLS_DESCSEQ:
8039 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8040 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8044 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8045 contents + rel->r_offset);
8047 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8051 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8054 bfd_put_32 (input_bfd, insn & 0xfffff000,
8055 contents + rel->r_offset);
8057 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8061 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8064 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8065 contents + rel->r_offset);
8069 (*_bfd_error_handler)
8070 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8071 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8072 return bfd_reloc_notsupported;
8076 case R_ARM_TLS_CALL:
8077 /* GD->IE relaxation, turn the instruction into 'nop' or
8078 'ldr r0, [pc,r0]' */
8079 insn = is_local ? 0xe1a00000 : 0xe79f0000;
8080 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8083 case R_ARM_THM_TLS_CALL:
8084 /* GD->IE relaxation. */
8086 /* add r0,pc; ldr r0, [r0] */
8088 else if (arch_has_thumb2_nop (globals))
8095 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
8096 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
8099 return bfd_reloc_ok;
8102 /* For a given value of n, calculate the value of G_n as required to
8103 deal with group relocations. We return it in the form of an
8104 encoded constant-and-rotation, together with the final residual. If n is
8105 specified as less than zero, then final_residual is filled with the
8106 input value and no further action is performed. */
8109 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
8113 bfd_vma encoded_g_n = 0;
8114 bfd_vma residual = value; /* Also known as Y_n. */
8116 for (current_n = 0; current_n <= n; current_n++)
8120 /* Calculate which part of the value to mask. */
8127 /* Determine the most significant bit in the residual and
8128 align the resulting value to a 2-bit boundary. */
8129 for (msb = 30; msb >= 0; msb -= 2)
8130 if (residual & (3 << msb))
8133 /* The desired shift is now (msb - 6), or zero, whichever
8140 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
8141 g_n = residual & (0xff << shift);
8142 encoded_g_n = (g_n >> shift)
8143 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
8145 /* Calculate the residual for the next time around. */
8149 *final_residual = residual;
8154 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8155 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8158 identify_add_or_sub (bfd_vma insn)
8160 int opcode = insn & 0x1e00000;
8162 if (opcode == 1 << 23) /* ADD */
8165 if (opcode == 1 << 22) /* SUB */
8171 /* Perform a relocation as part of a final link. */
8173 static bfd_reloc_status_type
8174 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8177 asection * input_section,
8178 bfd_byte * contents,
8179 Elf_Internal_Rela * rel,
8181 struct bfd_link_info * info,
8183 const char * sym_name,
8184 unsigned char st_type,
8185 enum arm_st_branch_type branch_type,
8186 struct elf_link_hash_entry * h,
8187 bfd_boolean * unresolved_reloc_p,
8188 char ** error_message)
8190 unsigned long r_type = howto->type;
8191 unsigned long r_symndx;
8192 bfd_byte * hit_data = contents + rel->r_offset;
8193 bfd_vma * local_got_offsets;
8194 bfd_vma * local_tlsdesc_gotents;
8197 asection * sreloc = NULL;
8200 bfd_signed_vma signed_addend;
8201 unsigned char dynreloc_st_type;
8202 bfd_vma dynreloc_value;
8203 struct elf32_arm_link_hash_table * globals;
8204 struct elf32_arm_link_hash_entry *eh;
8205 union gotplt_union *root_plt;
8206 struct arm_plt_info *arm_plt;
8208 bfd_vma gotplt_offset;
8209 bfd_boolean has_iplt_entry;
8211 globals = elf32_arm_hash_table (info);
8212 if (globals == NULL)
8213 return bfd_reloc_notsupported;
8215 BFD_ASSERT (is_arm_elf (input_bfd));
8217 /* Some relocation types map to different relocations depending on the
8218 target. We pick the right one here. */
8219 r_type = arm_real_reloc_type (globals, r_type);
8221 /* It is possible to have linker relaxations on some TLS access
8222 models. Update our information here. */
8223 r_type = elf32_arm_tls_transition (info, r_type, h);
8225 if (r_type != howto->type)
8226 howto = elf32_arm_howto_from_type (r_type);
8228 /* If the start address has been set, then set the EF_ARM_HASENTRY
8229 flag. Setting this more than once is redundant, but the cost is
8230 not too high, and it keeps the code simple.
8232 The test is done here, rather than somewhere else, because the
8233 start address is only set just before the final link commences.
8235 Note - if the user deliberately sets a start address of 0, the
8236 flag will not be set. */
8237 if (bfd_get_start_address (output_bfd) != 0)
8238 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
8240 eh = (struct elf32_arm_link_hash_entry *) h;
8241 sgot = globals->root.sgot;
8242 local_got_offsets = elf_local_got_offsets (input_bfd);
8243 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8245 if (globals->root.dynamic_sections_created)
8246 srelgot = globals->root.srelgot;
8250 r_symndx = ELF32_R_SYM (rel->r_info);
8252 if (globals->use_rel)
8254 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8256 if (addend & ((howto->src_mask + 1) >> 1))
8259 signed_addend &= ~ howto->src_mask;
8260 signed_addend |= addend;
8263 signed_addend = addend;
8266 addend = signed_addend = rel->r_addend;
8268 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
8269 are resolving a function call relocation. */
8270 if (using_thumb_only (globals)
8271 && (r_type == R_ARM_THM_CALL
8272 || r_type == R_ARM_THM_JUMP24)
8273 && branch_type == ST_BRANCH_TO_ARM)
8274 branch_type = ST_BRANCH_TO_THUMB;
8276 /* Record the symbol information that should be used in dynamic
8278 dynreloc_st_type = st_type;
8279 dynreloc_value = value;
8280 if (branch_type == ST_BRANCH_TO_THUMB)
8281 dynreloc_value |= 1;
8283 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8284 VALUE appropriately for relocations that we resolve at link time. */
8285 has_iplt_entry = FALSE;
8286 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8287 && root_plt->offset != (bfd_vma) -1)
8289 plt_offset = root_plt->offset;
8290 gotplt_offset = arm_plt->got_offset;
8292 if (h == NULL || eh->is_iplt)
8294 has_iplt_entry = TRUE;
8295 splt = globals->root.iplt;
8297 /* Populate .iplt entries here, because not all of them will
8298 be seen by finish_dynamic_symbol. The lower bit is set if
8299 we have already populated the entry. */
8304 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8305 -1, dynreloc_value))
8306 root_plt->offset |= 1;
8308 return bfd_reloc_notsupported;
8311 /* Static relocations always resolve to the .iplt entry. */
8313 value = (splt->output_section->vma
8314 + splt->output_offset
8316 branch_type = ST_BRANCH_TO_ARM;
8318 /* If there are non-call relocations that resolve to the .iplt
8319 entry, then all dynamic ones must too. */
8320 if (arm_plt->noncall_refcount != 0)
8322 dynreloc_st_type = st_type;
8323 dynreloc_value = value;
8327 /* We populate the .plt entry in finish_dynamic_symbol. */
8328 splt = globals->root.splt;
8333 plt_offset = (bfd_vma) -1;
8334 gotplt_offset = (bfd_vma) -1;
8340 /* We don't need to find a value for this symbol. It's just a
8342 *unresolved_reloc_p = FALSE;
8343 return bfd_reloc_ok;
8346 if (!globals->vxworks_p)
8347 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8351 case R_ARM_ABS32_NOI:
8353 case R_ARM_REL32_NOI:
8359 /* Handle relocations which should use the PLT entry. ABS32/REL32
8360 will use the symbol's value, which may point to a PLT entry, but we
8361 don't need to handle that here. If we created a PLT entry, all
8362 branches in this object should go to it, except if the PLT is too
8363 far away, in which case a long branch stub should be inserted. */
8364 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8365 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8366 && r_type != R_ARM_CALL
8367 && r_type != R_ARM_JUMP24
8368 && r_type != R_ARM_PLT32)
8369 && plt_offset != (bfd_vma) -1)
8371 /* If we've created a .plt section, and assigned a PLT entry
8372 to this function, it must either be a STT_GNU_IFUNC reference
8373 or not be known to bind locally. In other cases, we should
8374 have cleared the PLT entry by now. */
8375 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8377 value = (splt->output_section->vma
8378 + splt->output_offset
8380 *unresolved_reloc_p = FALSE;
8381 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8382 contents, rel->r_offset, value,
8386 /* When generating a shared object or relocatable executable, these
8387 relocations are copied into the output file to be resolved at
8389 if ((info->shared || globals->root.is_relocatable_executable)
8390 && (input_section->flags & SEC_ALLOC)
8391 && !(globals->vxworks_p
8392 && strcmp (input_section->output_section->name,
8394 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8395 || !SYMBOL_CALLS_LOCAL (info, h))
8396 && !(input_bfd == globals->stub_bfd
8397 && strstr (input_section->name, STUB_SUFFIX))
8399 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8400 || h->root.type != bfd_link_hash_undefweak)
8401 && r_type != R_ARM_PC24
8402 && r_type != R_ARM_CALL
8403 && r_type != R_ARM_JUMP24
8404 && r_type != R_ARM_PREL31
8405 && r_type != R_ARM_PLT32)
8407 Elf_Internal_Rela outrel;
8408 bfd_boolean skip, relocate;
8410 *unresolved_reloc_p = FALSE;
8412 if (sreloc == NULL && globals->root.dynamic_sections_created)
8414 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8415 ! globals->use_rel);
8418 return bfd_reloc_notsupported;
8424 outrel.r_addend = addend;
8426 _bfd_elf_section_offset (output_bfd, info, input_section,
8428 if (outrel.r_offset == (bfd_vma) -1)
8430 else if (outrel.r_offset == (bfd_vma) -2)
8431 skip = TRUE, relocate = TRUE;
8432 outrel.r_offset += (input_section->output_section->vma
8433 + input_section->output_offset);
8436 memset (&outrel, 0, sizeof outrel);
8441 || !h->def_regular))
8442 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8447 /* This symbol is local, or marked to become local. */
8448 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8449 if (globals->symbian_p)
8453 /* On Symbian OS, the data segment and text segement
8454 can be relocated independently. Therefore, we
8455 must indicate the segment to which this
8456 relocation is relative. The BPABI allows us to
8457 use any symbol in the right segment; we just use
8458 the section symbol as it is convenient. (We
8459 cannot use the symbol given by "h" directly as it
8460 will not appear in the dynamic symbol table.)
8462 Note that the dynamic linker ignores the section
8463 symbol value, so we don't subtract osec->vma
8464 from the emitted reloc addend. */
8466 osec = sym_sec->output_section;
8468 osec = input_section->output_section;
8469 symbol = elf_section_data (osec)->dynindx;
8472 struct elf_link_hash_table *htab = elf_hash_table (info);
8474 if ((osec->flags & SEC_READONLY) == 0
8475 && htab->data_index_section != NULL)
8476 osec = htab->data_index_section;
8478 osec = htab->text_index_section;
8479 symbol = elf_section_data (osec)->dynindx;
8481 BFD_ASSERT (symbol != 0);
8484 /* On SVR4-ish systems, the dynamic loader cannot
8485 relocate the text and data segments independently,
8486 so the symbol does not matter. */
8488 if (dynreloc_st_type == STT_GNU_IFUNC)
8489 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8490 to the .iplt entry. Instead, every non-call reference
8491 must use an R_ARM_IRELATIVE relocation to obtain the
8492 correct run-time address. */
8493 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8495 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8496 if (globals->use_rel)
8499 outrel.r_addend += dynreloc_value;
8502 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8504 /* If this reloc is against an external symbol, we do not want to
8505 fiddle with the addend. Otherwise, we need to include the symbol
8506 value so that it becomes an addend for the dynamic reloc. */
8508 return bfd_reloc_ok;
8510 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8511 contents, rel->r_offset,
8512 dynreloc_value, (bfd_vma) 0);
8514 else switch (r_type)
8517 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8519 case R_ARM_XPC25: /* Arm BLX instruction. */
8522 case R_ARM_PC24: /* Arm B/BL instruction. */
8525 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8527 if (r_type == R_ARM_XPC25)
8529 /* Check for Arm calling Arm function. */
8530 /* FIXME: Should we translate the instruction into a BL
8531 instruction instead ? */
8532 if (branch_type != ST_BRANCH_TO_THUMB)
8533 (*_bfd_error_handler)
8534 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8536 h ? h->root.root.string : "(local)");
8538 else if (r_type == R_ARM_PC24)
8540 /* Check for Arm calling Thumb function. */
8541 if (branch_type == ST_BRANCH_TO_THUMB)
8543 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8544 output_bfd, input_section,
8545 hit_data, sym_sec, rel->r_offset,
8546 signed_addend, value,
8548 return bfd_reloc_ok;
8550 return bfd_reloc_dangerous;
8554 /* Check if a stub has to be inserted because the
8555 destination is too far or we are changing mode. */
8556 if ( r_type == R_ARM_CALL
8557 || r_type == R_ARM_JUMP24
8558 || r_type == R_ARM_PLT32)
8560 enum elf32_arm_stub_type stub_type = arm_stub_none;
8561 struct elf32_arm_link_hash_entry *hash;
8563 hash = (struct elf32_arm_link_hash_entry *) h;
8564 stub_type = arm_type_of_stub (info, input_section, rel,
8565 st_type, &branch_type,
8566 hash, value, sym_sec,
8567 input_bfd, sym_name);
8569 if (stub_type != arm_stub_none)
8571 /* The target is out of reach, so redirect the
8572 branch to the local stub for this function. */
8573 stub_entry = elf32_arm_get_stub_entry (input_section,
8578 if (stub_entry != NULL)
8579 value = (stub_entry->stub_offset
8580 + stub_entry->stub_sec->output_offset
8581 + stub_entry->stub_sec->output_section->vma);
8583 if (plt_offset != (bfd_vma) -1)
8584 *unresolved_reloc_p = FALSE;
8589 /* If the call goes through a PLT entry, make sure to
8590 check distance to the right destination address. */
8591 if (plt_offset != (bfd_vma) -1)
8593 value = (splt->output_section->vma
8594 + splt->output_offset
8596 *unresolved_reloc_p = FALSE;
8597 /* The PLT entry is in ARM mode, regardless of the
8599 branch_type = ST_BRANCH_TO_ARM;
8604 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8606 S is the address of the symbol in the relocation.
8607 P is address of the instruction being relocated.
8608 A is the addend (extracted from the instruction) in bytes.
8610 S is held in 'value'.
8611 P is the base address of the section containing the
8612 instruction plus the offset of the reloc into that
8614 (input_section->output_section->vma +
8615 input_section->output_offset +
8617 A is the addend, converted into bytes, ie:
8620 Note: None of these operations have knowledge of the pipeline
8621 size of the processor, thus it is up to the assembler to
8622 encode this information into the addend. */
8623 value -= (input_section->output_section->vma
8624 + input_section->output_offset);
8625 value -= rel->r_offset;
8626 if (globals->use_rel)
8627 value += (signed_addend << howto->size);
8629 /* RELA addends do not have to be adjusted by howto->size. */
8630 value += signed_addend;
8632 signed_addend = value;
8633 signed_addend >>= howto->rightshift;
8635 /* A branch to an undefined weak symbol is turned into a jump to
8636 the next instruction unless a PLT entry will be created.
8637 Do the same for local undefined symbols (but not for STN_UNDEF).
8638 The jump to the next instruction is optimized as a NOP depending
8639 on the architecture. */
8640 if (h ? (h->root.type == bfd_link_hash_undefweak
8641 && plt_offset == (bfd_vma) -1)
8642 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8644 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8646 if (arch_has_arm_nop (globals))
8647 value |= 0x0320f000;
8649 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8653 /* Perform a signed range check. */
8654 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8655 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8656 return bfd_reloc_overflow;
8658 addend = (value & 2);
8660 value = (signed_addend & howto->dst_mask)
8661 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8663 if (r_type == R_ARM_CALL)
8665 /* Set the H bit in the BLX instruction. */
8666 if (branch_type == ST_BRANCH_TO_THUMB)
8671 value &= ~(bfd_vma)(1 << 24);
8674 /* Select the correct instruction (BL or BLX). */
8675 /* Only if we are not handling a BL to a stub. In this
8676 case, mode switching is performed by the stub. */
8677 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8679 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8681 value &= ~(bfd_vma)(1 << 28);
8691 if (branch_type == ST_BRANCH_TO_THUMB)
8695 case R_ARM_ABS32_NOI:
8701 if (branch_type == ST_BRANCH_TO_THUMB)
8703 value -= (input_section->output_section->vma
8704 + input_section->output_offset + rel->r_offset);
8707 case R_ARM_REL32_NOI:
8709 value -= (input_section->output_section->vma
8710 + input_section->output_offset + rel->r_offset);
8714 value -= (input_section->output_section->vma
8715 + input_section->output_offset + rel->r_offset);
8716 value += signed_addend;
8717 if (! h || h->root.type != bfd_link_hash_undefweak)
8719 /* Check for overflow. */
8720 if ((value ^ (value >> 1)) & (1 << 30))
8721 return bfd_reloc_overflow;
8723 value &= 0x7fffffff;
8724 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8725 if (branch_type == ST_BRANCH_TO_THUMB)
8730 bfd_put_32 (input_bfd, value, hit_data);
8731 return bfd_reloc_ok;
8734 /* PR 16202: Refectch the addend using the correct size. */
8735 if (globals->use_rel)
8736 addend = bfd_get_8 (input_bfd, hit_data);
8739 /* There is no way to tell whether the user intended to use a signed or
8740 unsigned addend. When checking for overflow we accept either,
8741 as specified by the AAELF. */
8742 if ((long) value > 0xff || (long) value < -0x80)
8743 return bfd_reloc_overflow;
8745 bfd_put_8 (input_bfd, value, hit_data);
8746 return bfd_reloc_ok;
8749 /* PR 16202: Refectch the addend using the correct size. */
8750 if (globals->use_rel)
8751 addend = bfd_get_16 (input_bfd, hit_data);
8754 /* See comment for R_ARM_ABS8. */
8755 if ((long) value > 0xffff || (long) value < -0x8000)
8756 return bfd_reloc_overflow;
8758 bfd_put_16 (input_bfd, value, hit_data);
8759 return bfd_reloc_ok;
8761 case R_ARM_THM_ABS5:
8762 /* Support ldr and str instructions for the thumb. */
8763 if (globals->use_rel)
8765 /* Need to refetch addend. */
8766 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8767 /* ??? Need to determine shift amount from operand size. */
8768 addend >>= howto->rightshift;
8772 /* ??? Isn't value unsigned? */
8773 if ((long) value > 0x1f || (long) value < -0x10)
8774 return bfd_reloc_overflow;
8776 /* ??? Value needs to be properly shifted into place first. */
8777 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8778 bfd_put_16 (input_bfd, value, hit_data);
8779 return bfd_reloc_ok;
8781 case R_ARM_THM_ALU_PREL_11_0:
8782 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8785 bfd_signed_vma relocation;
8787 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8788 | bfd_get_16 (input_bfd, hit_data + 2);
8790 if (globals->use_rel)
8792 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8793 | ((insn & (1 << 26)) >> 15);
8794 if (insn & 0xf00000)
8795 signed_addend = -signed_addend;
8798 relocation = value + signed_addend;
8799 relocation -= Pa (input_section->output_section->vma
8800 + input_section->output_offset
8803 value = abs (relocation);
8805 if (value >= 0x1000)
8806 return bfd_reloc_overflow;
8808 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8809 | ((value & 0x700) << 4)
8810 | ((value & 0x800) << 15);
8814 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8815 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8817 return bfd_reloc_ok;
8821 /* PR 10073: This reloc is not generated by the GNU toolchain,
8822 but it is supported for compatibility with third party libraries
8823 generated by other compilers, specifically the ARM/IAR. */
8826 bfd_signed_vma relocation;
8828 insn = bfd_get_16 (input_bfd, hit_data);
8830 if (globals->use_rel)
8831 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
8833 relocation = value + addend;
8834 relocation -= Pa (input_section->output_section->vma
8835 + input_section->output_offset
8838 value = abs (relocation);
8840 /* We do not check for overflow of this reloc. Although strictly
8841 speaking this is incorrect, it appears to be necessary in order
8842 to work with IAR generated relocs. Since GCC and GAS do not
8843 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8844 a problem for them. */
8847 insn = (insn & 0xff00) | (value >> 2);
8849 bfd_put_16 (input_bfd, insn, hit_data);
8851 return bfd_reloc_ok;
8854 case R_ARM_THM_PC12:
8855 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8858 bfd_signed_vma relocation;
8860 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8861 | bfd_get_16 (input_bfd, hit_data + 2);
8863 if (globals->use_rel)
8865 signed_addend = insn & 0xfff;
8866 if (!(insn & (1 << 23)))
8867 signed_addend = -signed_addend;
8870 relocation = value + signed_addend;
8871 relocation -= Pa (input_section->output_section->vma
8872 + input_section->output_offset
8875 value = abs (relocation);
8877 if (value >= 0x1000)
8878 return bfd_reloc_overflow;
8880 insn = (insn & 0xff7ff000) | value;
8881 if (relocation >= 0)
8884 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8885 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8887 return bfd_reloc_ok;
8890 case R_ARM_THM_XPC22:
8891 case R_ARM_THM_CALL:
8892 case R_ARM_THM_JUMP24:
8893 /* Thumb BL (branch long instruction). */
8897 bfd_boolean overflow = FALSE;
8898 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8899 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8900 bfd_signed_vma reloc_signed_max;
8901 bfd_signed_vma reloc_signed_min;
8903 bfd_signed_vma signed_check;
8905 const int thumb2 = using_thumb2 (globals);
8907 /* A branch to an undefined weak symbol is turned into a jump to
8908 the next instruction unless a PLT entry will be created.
8909 The jump to the next instruction is optimized as a NOP.W for
8910 Thumb-2 enabled architectures. */
8911 if (h && h->root.type == bfd_link_hash_undefweak
8912 && plt_offset == (bfd_vma) -1)
8914 if (arch_has_thumb2_nop (globals))
8916 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8917 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8921 bfd_put_16 (input_bfd, 0xe000, hit_data);
8922 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8924 return bfd_reloc_ok;
8927 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8928 with Thumb-1) involving the J1 and J2 bits. */
8929 if (globals->use_rel)
8931 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8932 bfd_vma upper = upper_insn & 0x3ff;
8933 bfd_vma lower = lower_insn & 0x7ff;
8934 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8935 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8936 bfd_vma i1 = j1 ^ s ? 0 : 1;
8937 bfd_vma i2 = j2 ^ s ? 0 : 1;
8939 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8941 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8943 signed_addend = addend;
8946 if (r_type == R_ARM_THM_XPC22)
8948 /* Check for Thumb to Thumb call. */
8949 /* FIXME: Should we translate the instruction into a BL
8950 instruction instead ? */
8951 if (branch_type == ST_BRANCH_TO_THUMB)
8952 (*_bfd_error_handler)
8953 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8955 h ? h->root.root.string : "(local)");
8959 /* If it is not a call to Thumb, assume call to Arm.
8960 If it is a call relative to a section name, then it is not a
8961 function call at all, but rather a long jump. Calls through
8962 the PLT do not require stubs. */
8963 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8965 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8967 /* Convert BL to BLX. */
8968 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8970 else if (( r_type != R_ARM_THM_CALL)
8971 && (r_type != R_ARM_THM_JUMP24))
8973 if (elf32_thumb_to_arm_stub
8974 (info, sym_name, input_bfd, output_bfd, input_section,
8975 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8977 return bfd_reloc_ok;
8979 return bfd_reloc_dangerous;
8982 else if (branch_type == ST_BRANCH_TO_THUMB
8984 && r_type == R_ARM_THM_CALL)
8986 /* Make sure this is a BL. */
8987 lower_insn |= 0x1800;
8991 enum elf32_arm_stub_type stub_type = arm_stub_none;
8992 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8994 /* Check if a stub has to be inserted because the destination
8996 struct elf32_arm_stub_hash_entry *stub_entry;
8997 struct elf32_arm_link_hash_entry *hash;
8999 hash = (struct elf32_arm_link_hash_entry *) h;
9001 stub_type = arm_type_of_stub (info, input_section, rel,
9002 st_type, &branch_type,
9003 hash, value, sym_sec,
9004 input_bfd, sym_name);
9006 if (stub_type != arm_stub_none)
9008 /* The target is out of reach or we are changing modes, so
9009 redirect the branch to the local stub for this
9011 stub_entry = elf32_arm_get_stub_entry (input_section,
9015 if (stub_entry != NULL)
9017 value = (stub_entry->stub_offset
9018 + stub_entry->stub_sec->output_offset
9019 + stub_entry->stub_sec->output_section->vma);
9021 if (plt_offset != (bfd_vma) -1)
9022 *unresolved_reloc_p = FALSE;
9025 /* If this call becomes a call to Arm, force BLX. */
9026 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9029 && !arm_stub_is_thumb (stub_entry->stub_type))
9030 || branch_type != ST_BRANCH_TO_THUMB)
9031 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9036 /* Handle calls via the PLT. */
9037 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9039 value = (splt->output_section->vma
9040 + splt->output_offset
9043 if (globals->use_blx
9044 && r_type == R_ARM_THM_CALL
9045 && ! using_thumb_only (globals))
9047 /* If the Thumb BLX instruction is available, convert
9048 the BL to a BLX instruction to call the ARM-mode
9050 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9051 branch_type = ST_BRANCH_TO_ARM;
9055 if (! using_thumb_only (globals))
9056 /* Target the Thumb stub before the ARM PLT entry. */
9057 value -= PLT_THUMB_STUB_SIZE;
9058 branch_type = ST_BRANCH_TO_THUMB;
9060 *unresolved_reloc_p = FALSE;
9063 relocation = value + signed_addend;
9065 relocation -= (input_section->output_section->vma
9066 + input_section->output_offset
9069 check = relocation >> howto->rightshift;
9071 /* If this is a signed value, the rightshift just dropped
9072 leading 1 bits (assuming twos complement). */
9073 if ((bfd_signed_vma) relocation >= 0)
9074 signed_check = check;
9076 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
9078 /* Calculate the permissable maximum and minimum values for
9079 this relocation according to whether we're relocating for
9081 bitsize = howto->bitsize;
9084 reloc_signed_max = (1 << (bitsize - 1)) - 1;
9085 reloc_signed_min = ~reloc_signed_max;
9087 /* Assumes two's complement. */
9088 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9091 if ((lower_insn & 0x5000) == 0x4000)
9092 /* For a BLX instruction, make sure that the relocation is rounded up
9093 to a word boundary. This follows the semantics of the instruction
9094 which specifies that bit 1 of the target address will come from bit
9095 1 of the base address. */
9096 relocation = (relocation + 2) & ~ 3;
9098 /* Put RELOCATION back into the insn. Assumes two's complement.
9099 We use the Thumb-2 encoding, which is safe even if dealing with
9100 a Thumb-1 instruction by virtue of our overflow check above. */
9101 reloc_sign = (signed_check < 0) ? 1 : 0;
9102 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
9103 | ((relocation >> 12) & 0x3ff)
9104 | (reloc_sign << 10);
9105 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
9106 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
9107 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
9108 | ((relocation >> 1) & 0x7ff);
9110 /* Put the relocated value back in the object file: */
9111 bfd_put_16 (input_bfd, upper_insn, hit_data);
9112 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9114 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9118 case R_ARM_THM_JUMP19:
9119 /* Thumb32 conditional branch instruction. */
9122 bfd_boolean overflow = FALSE;
9123 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9124 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9125 bfd_signed_vma reloc_signed_max = 0xffffe;
9126 bfd_signed_vma reloc_signed_min = -0x100000;
9127 bfd_signed_vma signed_check;
9129 /* Need to refetch the addend, reconstruct the top three bits,
9130 and squish the two 11 bit pieces together. */
9131 if (globals->use_rel)
9133 bfd_vma S = (upper_insn & 0x0400) >> 10;
9134 bfd_vma upper = (upper_insn & 0x003f);
9135 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
9136 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
9137 bfd_vma lower = (lower_insn & 0x07ff);
9142 upper -= 0x0100; /* Sign extend. */
9144 addend = (upper << 12) | (lower << 1);
9145 signed_addend = addend;
9148 /* Handle calls via the PLT. */
9149 if (plt_offset != (bfd_vma) -1)
9151 value = (splt->output_section->vma
9152 + splt->output_offset
9154 /* Target the Thumb stub before the ARM PLT entry. */
9155 value -= PLT_THUMB_STUB_SIZE;
9156 *unresolved_reloc_p = FALSE;
9159 /* ??? Should handle interworking? GCC might someday try to
9160 use this for tail calls. */
9162 relocation = value + signed_addend;
9163 relocation -= (input_section->output_section->vma
9164 + input_section->output_offset
9166 signed_check = (bfd_signed_vma) relocation;
9168 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9171 /* Put RELOCATION back into the insn. */
9173 bfd_vma S = (relocation & 0x00100000) >> 20;
9174 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9175 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9176 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9177 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9179 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9180 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9183 /* Put the relocated value back in the object file: */
9184 bfd_put_16 (input_bfd, upper_insn, hit_data);
9185 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9187 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9190 case R_ARM_THM_JUMP11:
9191 case R_ARM_THM_JUMP8:
9192 case R_ARM_THM_JUMP6:
9193 /* Thumb B (branch) instruction). */
9195 bfd_signed_vma relocation;
9196 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9197 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9198 bfd_signed_vma signed_check;
9200 /* CZB cannot jump backward. */
9201 if (r_type == R_ARM_THM_JUMP6)
9202 reloc_signed_min = 0;
9204 if (globals->use_rel)
9206 /* Need to refetch addend. */
9207 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9208 if (addend & ((howto->src_mask + 1) >> 1))
9211 signed_addend &= ~ howto->src_mask;
9212 signed_addend |= addend;
9215 signed_addend = addend;
9216 /* The value in the insn has been right shifted. We need to
9217 undo this, so that we can perform the address calculation
9218 in terms of bytes. */
9219 signed_addend <<= howto->rightshift;
9221 relocation = value + signed_addend;
9223 relocation -= (input_section->output_section->vma
9224 + input_section->output_offset
9227 relocation >>= howto->rightshift;
9228 signed_check = relocation;
9230 if (r_type == R_ARM_THM_JUMP6)
9231 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9233 relocation &= howto->dst_mask;
9234 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9236 bfd_put_16 (input_bfd, relocation, hit_data);
9238 /* Assumes two's complement. */
9239 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9240 return bfd_reloc_overflow;
9242 return bfd_reloc_ok;
9245 case R_ARM_ALU_PCREL7_0:
9246 case R_ARM_ALU_PCREL15_8:
9247 case R_ARM_ALU_PCREL23_15:
9252 insn = bfd_get_32 (input_bfd, hit_data);
9253 if (globals->use_rel)
9255 /* Extract the addend. */
9256 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9257 signed_addend = addend;
9259 relocation = value + signed_addend;
9261 relocation -= (input_section->output_section->vma
9262 + input_section->output_offset
9264 insn = (insn & ~0xfff)
9265 | ((howto->bitpos << 7) & 0xf00)
9266 | ((relocation >> howto->bitpos) & 0xff);
9267 bfd_put_32 (input_bfd, value, hit_data);
9269 return bfd_reloc_ok;
9271 case R_ARM_GNU_VTINHERIT:
9272 case R_ARM_GNU_VTENTRY:
9273 return bfd_reloc_ok;
9275 case R_ARM_GOTOFF32:
9276 /* Relocation is relative to the start of the
9277 global offset table. */
9279 BFD_ASSERT (sgot != NULL);
9281 return bfd_reloc_notsupported;
9283 /* If we are addressing a Thumb function, we need to adjust the
9284 address by one, so that attempts to call the function pointer will
9285 correctly interpret it as Thumb code. */
9286 if (branch_type == ST_BRANCH_TO_THUMB)
9289 /* Note that sgot->output_offset is not involved in this
9290 calculation. We always want the start of .got. If we
9291 define _GLOBAL_OFFSET_TABLE in a different way, as is
9292 permitted by the ABI, we might have to change this
9294 value -= sgot->output_section->vma;
9295 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9296 contents, rel->r_offset, value,
9300 /* Use global offset table as symbol value. */
9301 BFD_ASSERT (sgot != NULL);
9304 return bfd_reloc_notsupported;
9306 *unresolved_reloc_p = FALSE;
9307 value = sgot->output_section->vma;
9308 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9309 contents, rel->r_offset, value,
9313 case R_ARM_GOT_PREL:
9314 /* Relocation is to the entry for this symbol in the
9315 global offset table. */
9317 return bfd_reloc_notsupported;
9319 if (dynreloc_st_type == STT_GNU_IFUNC
9320 && plt_offset != (bfd_vma) -1
9321 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9323 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9324 symbol, and the relocation resolves directly to the runtime
9325 target rather than to the .iplt entry. This means that any
9326 .got entry would be the same value as the .igot.plt entry,
9327 so there's no point creating both. */
9328 sgot = globals->root.igotplt;
9329 value = sgot->output_offset + gotplt_offset;
9335 off = h->got.offset;
9336 BFD_ASSERT (off != (bfd_vma) -1);
9339 /* We have already processsed one GOT relocation against
9342 if (globals->root.dynamic_sections_created
9343 && !SYMBOL_REFERENCES_LOCAL (info, h))
9344 *unresolved_reloc_p = FALSE;
9348 Elf_Internal_Rela outrel;
9350 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
9352 /* If the symbol doesn't resolve locally in a static
9353 object, we have an undefined reference. If the
9354 symbol doesn't resolve locally in a dynamic object,
9355 it should be resolved by the dynamic linker. */
9356 if (globals->root.dynamic_sections_created)
9358 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9359 *unresolved_reloc_p = FALSE;
9363 outrel.r_addend = 0;
9367 if (dynreloc_st_type == STT_GNU_IFUNC)
9368 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9369 else if (info->shared &&
9370 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9371 || h->root.type != bfd_link_hash_undefweak))
9372 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9375 outrel.r_addend = dynreloc_value;
9378 /* The GOT entry is initialized to zero by default.
9379 See if we should install a different value. */
9380 if (outrel.r_addend != 0
9381 && (outrel.r_info == 0 || globals->use_rel))
9383 bfd_put_32 (output_bfd, outrel.r_addend,
9384 sgot->contents + off);
9385 outrel.r_addend = 0;
9388 if (outrel.r_info != 0)
9390 outrel.r_offset = (sgot->output_section->vma
9391 + sgot->output_offset
9393 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9397 value = sgot->output_offset + off;
9403 BFD_ASSERT (local_got_offsets != NULL &&
9404 local_got_offsets[r_symndx] != (bfd_vma) -1);
9406 off = local_got_offsets[r_symndx];
9408 /* The offset must always be a multiple of 4. We use the
9409 least significant bit to record whether we have already
9410 generated the necessary reloc. */
9415 if (globals->use_rel)
9416 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9418 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9420 Elf_Internal_Rela outrel;
9422 outrel.r_addend = addend + dynreloc_value;
9423 outrel.r_offset = (sgot->output_section->vma
9424 + sgot->output_offset
9426 if (dynreloc_st_type == STT_GNU_IFUNC)
9427 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9429 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9430 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9433 local_got_offsets[r_symndx] |= 1;
9436 value = sgot->output_offset + off;
9438 if (r_type != R_ARM_GOT32)
9439 value += sgot->output_section->vma;
9441 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9442 contents, rel->r_offset, value,
9445 case R_ARM_TLS_LDO32:
9446 value = value - dtpoff_base (info);
9448 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9449 contents, rel->r_offset, value,
9452 case R_ARM_TLS_LDM32:
9459 off = globals->tls_ldm_got.offset;
9465 /* If we don't know the module number, create a relocation
9469 Elf_Internal_Rela outrel;
9471 if (srelgot == NULL)
9474 outrel.r_addend = 0;
9475 outrel.r_offset = (sgot->output_section->vma
9476 + sgot->output_offset + off);
9477 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9479 if (globals->use_rel)
9480 bfd_put_32 (output_bfd, outrel.r_addend,
9481 sgot->contents + off);
9483 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9486 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9488 globals->tls_ldm_got.offset |= 1;
9491 value = sgot->output_section->vma + sgot->output_offset + off
9492 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9494 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9495 contents, rel->r_offset, value,
9499 case R_ARM_TLS_CALL:
9500 case R_ARM_THM_TLS_CALL:
9501 case R_ARM_TLS_GD32:
9502 case R_ARM_TLS_IE32:
9503 case R_ARM_TLS_GOTDESC:
9504 case R_ARM_TLS_DESCSEQ:
9505 case R_ARM_THM_TLS_DESCSEQ:
9507 bfd_vma off, offplt;
9511 BFD_ASSERT (sgot != NULL);
9516 dyn = globals->root.dynamic_sections_created;
9517 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9519 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9521 *unresolved_reloc_p = FALSE;
9524 off = h->got.offset;
9525 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9526 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9530 BFD_ASSERT (local_got_offsets != NULL);
9531 off = local_got_offsets[r_symndx];
9532 offplt = local_tlsdesc_gotents[r_symndx];
9533 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9536 /* Linker relaxations happens from one of the
9537 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9538 if (ELF32_R_TYPE(rel->r_info) != r_type)
9539 tls_type = GOT_TLS_IE;
9541 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9547 bfd_boolean need_relocs = FALSE;
9548 Elf_Internal_Rela outrel;
9551 /* The GOT entries have not been initialized yet. Do it
9552 now, and emit any relocations. If both an IE GOT and a
9553 GD GOT are necessary, we emit the GD first. */
9555 if ((info->shared || indx != 0)
9557 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9558 || h->root.type != bfd_link_hash_undefweak))
9561 BFD_ASSERT (srelgot != NULL);
9564 if (tls_type & GOT_TLS_GDESC)
9568 /* We should have relaxed, unless this is an undefined
9570 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9572 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9573 <= globals->root.sgotplt->size);
9575 outrel.r_addend = 0;
9576 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9577 + globals->root.sgotplt->output_offset
9579 + globals->sgotplt_jump_table_size);
9581 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9582 sreloc = globals->root.srelplt;
9583 loc = sreloc->contents;
9584 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9585 BFD_ASSERT (loc + RELOC_SIZE (globals)
9586 <= sreloc->contents + sreloc->size);
9588 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9590 /* For globals, the first word in the relocation gets
9591 the relocation index and the top bit set, or zero,
9592 if we're binding now. For locals, it gets the
9593 symbol's offset in the tls section. */
9594 bfd_put_32 (output_bfd,
9595 !h ? value - elf_hash_table (info)->tls_sec->vma
9596 : info->flags & DF_BIND_NOW ? 0
9597 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9598 globals->root.sgotplt->contents + offplt
9599 + globals->sgotplt_jump_table_size);
9601 /* Second word in the relocation is always zero. */
9602 bfd_put_32 (output_bfd, 0,
9603 globals->root.sgotplt->contents + offplt
9604 + globals->sgotplt_jump_table_size + 4);
9606 if (tls_type & GOT_TLS_GD)
9610 outrel.r_addend = 0;
9611 outrel.r_offset = (sgot->output_section->vma
9612 + sgot->output_offset
9614 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9616 if (globals->use_rel)
9617 bfd_put_32 (output_bfd, outrel.r_addend,
9618 sgot->contents + cur_off);
9620 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9623 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9624 sgot->contents + cur_off + 4);
9627 outrel.r_addend = 0;
9628 outrel.r_info = ELF32_R_INFO (indx,
9629 R_ARM_TLS_DTPOFF32);
9630 outrel.r_offset += 4;
9632 if (globals->use_rel)
9633 bfd_put_32 (output_bfd, outrel.r_addend,
9634 sgot->contents + cur_off + 4);
9636 elf32_arm_add_dynreloc (output_bfd, info,
9642 /* If we are not emitting relocations for a
9643 general dynamic reference, then we must be in a
9644 static link or an executable link with the
9645 symbol binding locally. Mark it as belonging
9646 to module 1, the executable. */
9647 bfd_put_32 (output_bfd, 1,
9648 sgot->contents + cur_off);
9649 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9650 sgot->contents + cur_off + 4);
9656 if (tls_type & GOT_TLS_IE)
9661 outrel.r_addend = value - dtpoff_base (info);
9663 outrel.r_addend = 0;
9664 outrel.r_offset = (sgot->output_section->vma
9665 + sgot->output_offset
9667 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9669 if (globals->use_rel)
9670 bfd_put_32 (output_bfd, outrel.r_addend,
9671 sgot->contents + cur_off);
9673 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9676 bfd_put_32 (output_bfd, tpoff (info, value),
9677 sgot->contents + cur_off);
9684 local_got_offsets[r_symndx] |= 1;
9687 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9689 else if (tls_type & GOT_TLS_GDESC)
9692 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9693 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9695 bfd_signed_vma offset;
9696 /* TLS stubs are arm mode. The original symbol is a
9697 data object, so branch_type is bogus. */
9698 branch_type = ST_BRANCH_TO_ARM;
9699 enum elf32_arm_stub_type stub_type
9700 = arm_type_of_stub (info, input_section, rel,
9701 st_type, &branch_type,
9702 (struct elf32_arm_link_hash_entry *)h,
9703 globals->tls_trampoline, globals->root.splt,
9704 input_bfd, sym_name);
9706 if (stub_type != arm_stub_none)
9708 struct elf32_arm_stub_hash_entry *stub_entry
9709 = elf32_arm_get_stub_entry
9710 (input_section, globals->root.splt, 0, rel,
9711 globals, stub_type);
9712 offset = (stub_entry->stub_offset
9713 + stub_entry->stub_sec->output_offset
9714 + stub_entry->stub_sec->output_section->vma);
9717 offset = (globals->root.splt->output_section->vma
9718 + globals->root.splt->output_offset
9719 + globals->tls_trampoline);
9721 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9725 offset -= (input_section->output_section->vma
9726 + input_section->output_offset
9727 + rel->r_offset + 8);
9731 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9735 /* Thumb blx encodes the offset in a complicated
9737 unsigned upper_insn, lower_insn;
9740 offset -= (input_section->output_section->vma
9741 + input_section->output_offset
9742 + rel->r_offset + 4);
9744 if (stub_type != arm_stub_none
9745 && arm_stub_is_thumb (stub_type))
9747 lower_insn = 0xd000;
9751 lower_insn = 0xc000;
9752 /* Round up the offset to a word boundary. */
9753 offset = (offset + 2) & ~2;
9757 upper_insn = (0xf000
9758 | ((offset >> 12) & 0x3ff)
9760 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9761 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9762 | ((offset >> 1) & 0x7ff);
9763 bfd_put_16 (input_bfd, upper_insn, hit_data);
9764 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9765 return bfd_reloc_ok;
9768 /* These relocations needs special care, as besides the fact
9769 they point somewhere in .gotplt, the addend must be
9770 adjusted accordingly depending on the type of instruction
9772 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9774 unsigned long data, insn;
9777 data = bfd_get_32 (input_bfd, hit_data);
9783 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9784 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9786 | bfd_get_16 (input_bfd,
9787 contents + rel->r_offset - data + 2);
9788 if ((insn & 0xf800c000) == 0xf000c000)
9791 else if ((insn & 0xffffff00) == 0x4400)
9796 (*_bfd_error_handler)
9797 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9798 input_bfd, input_section,
9799 (unsigned long)rel->r_offset, insn);
9800 return bfd_reloc_notsupported;
9805 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9810 case 0xfa: /* blx */
9814 case 0xe0: /* add */
9819 (*_bfd_error_handler)
9820 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9821 input_bfd, input_section,
9822 (unsigned long)rel->r_offset, insn);
9823 return bfd_reloc_notsupported;
9827 value += ((globals->root.sgotplt->output_section->vma
9828 + globals->root.sgotplt->output_offset + off)
9829 - (input_section->output_section->vma
9830 + input_section->output_offset
9832 + globals->sgotplt_jump_table_size);
9835 value = ((globals->root.sgot->output_section->vma
9836 + globals->root.sgot->output_offset + off)
9837 - (input_section->output_section->vma
9838 + input_section->output_offset + rel->r_offset));
9840 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9841 contents, rel->r_offset, value,
9845 case R_ARM_TLS_LE32:
9846 if (info->shared && !info->pie)
9848 (*_bfd_error_handler)
9849 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9850 input_bfd, input_section,
9851 (long) rel->r_offset, howto->name);
9852 return bfd_reloc_notsupported;
9855 value = tpoff (info, value);
9857 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9858 contents, rel->r_offset, value,
9862 if (globals->fix_v4bx)
9864 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9866 /* Ensure that we have a BX instruction. */
9867 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9869 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9871 /* Branch to veneer. */
9873 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9874 glue_addr -= input_section->output_section->vma
9875 + input_section->output_offset
9876 + rel->r_offset + 8;
9877 insn = (insn & 0xf0000000) | 0x0a000000
9878 | ((glue_addr >> 2) & 0x00ffffff);
9882 /* Preserve Rm (lowest four bits) and the condition code
9883 (highest four bits). Other bits encode MOV PC,Rm. */
9884 insn = (insn & 0xf000000f) | 0x01a0f000;
9887 bfd_put_32 (input_bfd, insn, hit_data);
9889 return bfd_reloc_ok;
9891 case R_ARM_MOVW_ABS_NC:
9892 case R_ARM_MOVT_ABS:
9893 case R_ARM_MOVW_PREL_NC:
9894 case R_ARM_MOVT_PREL:
9895 /* Until we properly support segment-base-relative addressing then
9896 we assume the segment base to be zero, as for the group relocations.
9897 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9898 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9899 case R_ARM_MOVW_BREL_NC:
9900 case R_ARM_MOVW_BREL:
9901 case R_ARM_MOVT_BREL:
9903 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9905 if (globals->use_rel)
9907 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9908 signed_addend = (addend ^ 0x8000) - 0x8000;
9911 value += signed_addend;
9913 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9914 value -= (input_section->output_section->vma
9915 + input_section->output_offset + rel->r_offset);
9917 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9918 return bfd_reloc_overflow;
9920 if (branch_type == ST_BRANCH_TO_THUMB)
9923 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9924 || r_type == R_ARM_MOVT_BREL)
9928 insn |= value & 0xfff;
9929 insn |= (value & 0xf000) << 4;
9930 bfd_put_32 (input_bfd, insn, hit_data);
9932 return bfd_reloc_ok;
9934 case R_ARM_THM_MOVW_ABS_NC:
9935 case R_ARM_THM_MOVT_ABS:
9936 case R_ARM_THM_MOVW_PREL_NC:
9937 case R_ARM_THM_MOVT_PREL:
9938 /* Until we properly support segment-base-relative addressing then
9939 we assume the segment base to be zero, as for the above relocations.
9940 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9941 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9942 as R_ARM_THM_MOVT_ABS. */
9943 case R_ARM_THM_MOVW_BREL_NC:
9944 case R_ARM_THM_MOVW_BREL:
9945 case R_ARM_THM_MOVT_BREL:
9949 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9950 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9952 if (globals->use_rel)
9954 addend = ((insn >> 4) & 0xf000)
9955 | ((insn >> 15) & 0x0800)
9956 | ((insn >> 4) & 0x0700)
9958 signed_addend = (addend ^ 0x8000) - 0x8000;
9961 value += signed_addend;
9963 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9964 value -= (input_section->output_section->vma
9965 + input_section->output_offset + rel->r_offset);
9967 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9968 return bfd_reloc_overflow;
9970 if (branch_type == ST_BRANCH_TO_THUMB)
9973 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9974 || r_type == R_ARM_THM_MOVT_BREL)
9978 insn |= (value & 0xf000) << 4;
9979 insn |= (value & 0x0800) << 15;
9980 insn |= (value & 0x0700) << 4;
9981 insn |= (value & 0x00ff);
9983 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9984 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9986 return bfd_reloc_ok;
9988 case R_ARM_ALU_PC_G0_NC:
9989 case R_ARM_ALU_PC_G1_NC:
9990 case R_ARM_ALU_PC_G0:
9991 case R_ARM_ALU_PC_G1:
9992 case R_ARM_ALU_PC_G2:
9993 case R_ARM_ALU_SB_G0_NC:
9994 case R_ARM_ALU_SB_G1_NC:
9995 case R_ARM_ALU_SB_G0:
9996 case R_ARM_ALU_SB_G1:
9997 case R_ARM_ALU_SB_G2:
9999 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10000 bfd_vma pc = input_section->output_section->vma
10001 + input_section->output_offset + rel->r_offset;
10002 /* sb is the origin of the *segment* containing the symbol. */
10003 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10006 bfd_signed_vma signed_value;
10009 /* Determine which group of bits to select. */
10012 case R_ARM_ALU_PC_G0_NC:
10013 case R_ARM_ALU_PC_G0:
10014 case R_ARM_ALU_SB_G0_NC:
10015 case R_ARM_ALU_SB_G0:
10019 case R_ARM_ALU_PC_G1_NC:
10020 case R_ARM_ALU_PC_G1:
10021 case R_ARM_ALU_SB_G1_NC:
10022 case R_ARM_ALU_SB_G1:
10026 case R_ARM_ALU_PC_G2:
10027 case R_ARM_ALU_SB_G2:
10035 /* If REL, extract the addend from the insn. If RELA, it will
10036 have already been fetched for us. */
10037 if (globals->use_rel)
10040 bfd_vma constant = insn & 0xff;
10041 bfd_vma rotation = (insn & 0xf00) >> 8;
10044 signed_addend = constant;
10047 /* Compensate for the fact that in the instruction, the
10048 rotation is stored in multiples of 2 bits. */
10051 /* Rotate "constant" right by "rotation" bits. */
10052 signed_addend = (constant >> rotation) |
10053 (constant << (8 * sizeof (bfd_vma) - rotation));
10056 /* Determine if the instruction is an ADD or a SUB.
10057 (For REL, this determines the sign of the addend.) */
10058 negative = identify_add_or_sub (insn);
10061 (*_bfd_error_handler)
10062 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10063 input_bfd, input_section,
10064 (long) rel->r_offset, howto->name);
10065 return bfd_reloc_overflow;
10068 signed_addend *= negative;
10071 /* Compute the value (X) to go in the place. */
10072 if (r_type == R_ARM_ALU_PC_G0_NC
10073 || r_type == R_ARM_ALU_PC_G1_NC
10074 || r_type == R_ARM_ALU_PC_G0
10075 || r_type == R_ARM_ALU_PC_G1
10076 || r_type == R_ARM_ALU_PC_G2)
10078 signed_value = value - pc + signed_addend;
10080 /* Section base relative. */
10081 signed_value = value - sb + signed_addend;
10083 /* If the target symbol is a Thumb function, then set the
10084 Thumb bit in the address. */
10085 if (branch_type == ST_BRANCH_TO_THUMB)
10088 /* Calculate the value of the relevant G_n, in encoded
10089 constant-with-rotation format. */
10090 g_n = calculate_group_reloc_mask (abs (signed_value), group,
10093 /* Check for overflow if required. */
10094 if ((r_type == R_ARM_ALU_PC_G0
10095 || r_type == R_ARM_ALU_PC_G1
10096 || r_type == R_ARM_ALU_PC_G2
10097 || r_type == R_ARM_ALU_SB_G0
10098 || r_type == R_ARM_ALU_SB_G1
10099 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
10101 (*_bfd_error_handler)
10102 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10103 input_bfd, input_section,
10104 (long) rel->r_offset, abs (signed_value), howto->name);
10105 return bfd_reloc_overflow;
10108 /* Mask out the value and the ADD/SUB part of the opcode; take care
10109 not to destroy the S bit. */
10110 insn &= 0xff1ff000;
10112 /* Set the opcode according to whether the value to go in the
10113 place is negative. */
10114 if (signed_value < 0)
10119 /* Encode the offset. */
10122 bfd_put_32 (input_bfd, insn, hit_data);
10124 return bfd_reloc_ok;
10126 case R_ARM_LDR_PC_G0:
10127 case R_ARM_LDR_PC_G1:
10128 case R_ARM_LDR_PC_G2:
10129 case R_ARM_LDR_SB_G0:
10130 case R_ARM_LDR_SB_G1:
10131 case R_ARM_LDR_SB_G2:
10133 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10134 bfd_vma pc = input_section->output_section->vma
10135 + input_section->output_offset + rel->r_offset;
10136 /* sb is the origin of the *segment* containing the symbol. */
10137 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10139 bfd_signed_vma signed_value;
10142 /* Determine which groups of bits to calculate. */
10145 case R_ARM_LDR_PC_G0:
10146 case R_ARM_LDR_SB_G0:
10150 case R_ARM_LDR_PC_G1:
10151 case R_ARM_LDR_SB_G1:
10155 case R_ARM_LDR_PC_G2:
10156 case R_ARM_LDR_SB_G2:
10164 /* If REL, extract the addend from the insn. If RELA, it will
10165 have already been fetched for us. */
10166 if (globals->use_rel)
10168 int negative = (insn & (1 << 23)) ? 1 : -1;
10169 signed_addend = negative * (insn & 0xfff);
10172 /* Compute the value (X) to go in the place. */
10173 if (r_type == R_ARM_LDR_PC_G0
10174 || r_type == R_ARM_LDR_PC_G1
10175 || r_type == R_ARM_LDR_PC_G2)
10177 signed_value = value - pc + signed_addend;
10179 /* Section base relative. */
10180 signed_value = value - sb + signed_addend;
10182 /* Calculate the value of the relevant G_{n-1} to obtain
10183 the residual at that stage. */
10184 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10186 /* Check for overflow. */
10187 if (residual >= 0x1000)
10189 (*_bfd_error_handler)
10190 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10191 input_bfd, input_section,
10192 (long) rel->r_offset, abs (signed_value), howto->name);
10193 return bfd_reloc_overflow;
10196 /* Mask out the value and U bit. */
10197 insn &= 0xff7ff000;
10199 /* Set the U bit if the value to go in the place is non-negative. */
10200 if (signed_value >= 0)
10203 /* Encode the offset. */
10206 bfd_put_32 (input_bfd, insn, hit_data);
10208 return bfd_reloc_ok;
10210 case R_ARM_LDRS_PC_G0:
10211 case R_ARM_LDRS_PC_G1:
10212 case R_ARM_LDRS_PC_G2:
10213 case R_ARM_LDRS_SB_G0:
10214 case R_ARM_LDRS_SB_G1:
10215 case R_ARM_LDRS_SB_G2:
10217 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10218 bfd_vma pc = input_section->output_section->vma
10219 + input_section->output_offset + rel->r_offset;
10220 /* sb is the origin of the *segment* containing the symbol. */
10221 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10223 bfd_signed_vma signed_value;
10226 /* Determine which groups of bits to calculate. */
10229 case R_ARM_LDRS_PC_G0:
10230 case R_ARM_LDRS_SB_G0:
10234 case R_ARM_LDRS_PC_G1:
10235 case R_ARM_LDRS_SB_G1:
10239 case R_ARM_LDRS_PC_G2:
10240 case R_ARM_LDRS_SB_G2:
10248 /* If REL, extract the addend from the insn. If RELA, it will
10249 have already been fetched for us. */
10250 if (globals->use_rel)
10252 int negative = (insn & (1 << 23)) ? 1 : -1;
10253 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10256 /* Compute the value (X) to go in the place. */
10257 if (r_type == R_ARM_LDRS_PC_G0
10258 || r_type == R_ARM_LDRS_PC_G1
10259 || r_type == R_ARM_LDRS_PC_G2)
10261 signed_value = value - pc + signed_addend;
10263 /* Section base relative. */
10264 signed_value = value - sb + signed_addend;
10266 /* Calculate the value of the relevant G_{n-1} to obtain
10267 the residual at that stage. */
10268 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10270 /* Check for overflow. */
10271 if (residual >= 0x100)
10273 (*_bfd_error_handler)
10274 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10275 input_bfd, input_section,
10276 (long) rel->r_offset, abs (signed_value), howto->name);
10277 return bfd_reloc_overflow;
10280 /* Mask out the value and U bit. */
10281 insn &= 0xff7ff0f0;
10283 /* Set the U bit if the value to go in the place is non-negative. */
10284 if (signed_value >= 0)
10287 /* Encode the offset. */
10288 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10290 bfd_put_32 (input_bfd, insn, hit_data);
10292 return bfd_reloc_ok;
10294 case R_ARM_LDC_PC_G0:
10295 case R_ARM_LDC_PC_G1:
10296 case R_ARM_LDC_PC_G2:
10297 case R_ARM_LDC_SB_G0:
10298 case R_ARM_LDC_SB_G1:
10299 case R_ARM_LDC_SB_G2:
10301 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10302 bfd_vma pc = input_section->output_section->vma
10303 + input_section->output_offset + rel->r_offset;
10304 /* sb is the origin of the *segment* containing the symbol. */
10305 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10307 bfd_signed_vma signed_value;
10310 /* Determine which groups of bits to calculate. */
10313 case R_ARM_LDC_PC_G0:
10314 case R_ARM_LDC_SB_G0:
10318 case R_ARM_LDC_PC_G1:
10319 case R_ARM_LDC_SB_G1:
10323 case R_ARM_LDC_PC_G2:
10324 case R_ARM_LDC_SB_G2:
10332 /* If REL, extract the addend from the insn. If RELA, it will
10333 have already been fetched for us. */
10334 if (globals->use_rel)
10336 int negative = (insn & (1 << 23)) ? 1 : -1;
10337 signed_addend = negative * ((insn & 0xff) << 2);
10340 /* Compute the value (X) to go in the place. */
10341 if (r_type == R_ARM_LDC_PC_G0
10342 || r_type == R_ARM_LDC_PC_G1
10343 || r_type == R_ARM_LDC_PC_G2)
10345 signed_value = value - pc + signed_addend;
10347 /* Section base relative. */
10348 signed_value = value - sb + signed_addend;
10350 /* Calculate the value of the relevant G_{n-1} to obtain
10351 the residual at that stage. */
10352 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10354 /* Check for overflow. (The absolute value to go in the place must be
10355 divisible by four and, after having been divided by four, must
10356 fit in eight bits.) */
10357 if ((residual & 0x3) != 0 || residual >= 0x400)
10359 (*_bfd_error_handler)
10360 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10361 input_bfd, input_section,
10362 (long) rel->r_offset, abs (signed_value), howto->name);
10363 return bfd_reloc_overflow;
10366 /* Mask out the value and U bit. */
10367 insn &= 0xff7fff00;
10369 /* Set the U bit if the value to go in the place is non-negative. */
10370 if (signed_value >= 0)
10373 /* Encode the offset. */
10374 insn |= residual >> 2;
10376 bfd_put_32 (input_bfd, insn, hit_data);
10378 return bfd_reloc_ok;
10381 return bfd_reloc_notsupported;
10385 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10387 arm_add_to_rel (bfd * abfd,
10388 bfd_byte * address,
10389 reloc_howto_type * howto,
10390 bfd_signed_vma increment)
10392 bfd_signed_vma addend;
10394 if (howto->type == R_ARM_THM_CALL
10395 || howto->type == R_ARM_THM_JUMP24)
10397 int upper_insn, lower_insn;
10400 upper_insn = bfd_get_16 (abfd, address);
10401 lower_insn = bfd_get_16 (abfd, address + 2);
10402 upper = upper_insn & 0x7ff;
10403 lower = lower_insn & 0x7ff;
10405 addend = (upper << 12) | (lower << 1);
10406 addend += increment;
10409 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10410 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10412 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10413 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10419 contents = bfd_get_32 (abfd, address);
10421 /* Get the (signed) value from the instruction. */
10422 addend = contents & howto->src_mask;
10423 if (addend & ((howto->src_mask + 1) >> 1))
10425 bfd_signed_vma mask;
10428 mask &= ~ howto->src_mask;
10432 /* Add in the increment, (which is a byte value). */
10433 switch (howto->type)
10436 addend += increment;
10443 addend <<= howto->size;
10444 addend += increment;
10446 /* Should we check for overflow here ? */
10448 /* Drop any undesired bits. */
10449 addend >>= howto->rightshift;
10453 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10455 bfd_put_32 (abfd, contents, address);
10459 #define IS_ARM_TLS_RELOC(R_TYPE) \
10460 ((R_TYPE) == R_ARM_TLS_GD32 \
10461 || (R_TYPE) == R_ARM_TLS_LDO32 \
10462 || (R_TYPE) == R_ARM_TLS_LDM32 \
10463 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10464 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10465 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10466 || (R_TYPE) == R_ARM_TLS_LE32 \
10467 || (R_TYPE) == R_ARM_TLS_IE32 \
10468 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10470 /* Specific set of relocations for the gnu tls dialect. */
10471 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10472 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10473 || (R_TYPE) == R_ARM_TLS_CALL \
10474 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10475 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10476 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10478 /* Relocate an ARM ELF section. */
10481 elf32_arm_relocate_section (bfd * output_bfd,
10482 struct bfd_link_info * info,
10484 asection * input_section,
10485 bfd_byte * contents,
10486 Elf_Internal_Rela * relocs,
10487 Elf_Internal_Sym * local_syms,
10488 asection ** local_sections)
10490 Elf_Internal_Shdr *symtab_hdr;
10491 struct elf_link_hash_entry **sym_hashes;
10492 Elf_Internal_Rela *rel;
10493 Elf_Internal_Rela *relend;
10495 struct elf32_arm_link_hash_table * globals;
10497 globals = elf32_arm_hash_table (info);
10498 if (globals == NULL)
10501 symtab_hdr = & elf_symtab_hdr (input_bfd);
10502 sym_hashes = elf_sym_hashes (input_bfd);
10505 relend = relocs + input_section->reloc_count;
10506 for (; rel < relend; rel++)
10509 reloc_howto_type * howto;
10510 unsigned long r_symndx;
10511 Elf_Internal_Sym * sym;
10513 struct elf_link_hash_entry * h;
10514 bfd_vma relocation;
10515 bfd_reloc_status_type r;
10518 bfd_boolean unresolved_reloc = FALSE;
10519 char *error_message = NULL;
10521 r_symndx = ELF32_R_SYM (rel->r_info);
10522 r_type = ELF32_R_TYPE (rel->r_info);
10523 r_type = arm_real_reloc_type (globals, r_type);
10525 if ( r_type == R_ARM_GNU_VTENTRY
10526 || r_type == R_ARM_GNU_VTINHERIT)
10529 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10530 howto = bfd_reloc.howto;
10536 if (r_symndx < symtab_hdr->sh_info)
10538 sym = local_syms + r_symndx;
10539 sym_type = ELF32_ST_TYPE (sym->st_info);
10540 sec = local_sections[r_symndx];
10542 /* An object file might have a reference to a local
10543 undefined symbol. This is a daft object file, but we
10544 should at least do something about it. V4BX & NONE
10545 relocations do not use the symbol and are explicitly
10546 allowed to use the undefined symbol, so allow those.
10547 Likewise for relocations against STN_UNDEF. */
10548 if (r_type != R_ARM_V4BX
10549 && r_type != R_ARM_NONE
10550 && r_symndx != STN_UNDEF
10551 && bfd_is_und_section (sec)
10552 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10554 if (!info->callbacks->undefined_symbol
10555 (info, bfd_elf_string_from_elf_section
10556 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10557 input_bfd, input_section,
10558 rel->r_offset, TRUE))
10562 if (globals->use_rel)
10564 relocation = (sec->output_section->vma
10565 + sec->output_offset
10567 if (!info->relocatable
10568 && (sec->flags & SEC_MERGE)
10569 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10572 bfd_vma addend, value;
10576 case R_ARM_MOVW_ABS_NC:
10577 case R_ARM_MOVT_ABS:
10578 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10579 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10580 addend = (addend ^ 0x8000) - 0x8000;
10583 case R_ARM_THM_MOVW_ABS_NC:
10584 case R_ARM_THM_MOVT_ABS:
10585 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10587 value |= bfd_get_16 (input_bfd,
10588 contents + rel->r_offset + 2);
10589 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10590 | ((value & 0x04000000) >> 15);
10591 addend = (addend ^ 0x8000) - 0x8000;
10595 if (howto->rightshift
10596 || (howto->src_mask & (howto->src_mask + 1)))
10598 (*_bfd_error_handler)
10599 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10600 input_bfd, input_section,
10601 (long) rel->r_offset, howto->name);
10605 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10607 /* Get the (signed) value from the instruction. */
10608 addend = value & howto->src_mask;
10609 if (addend & ((howto->src_mask + 1) >> 1))
10611 bfd_signed_vma mask;
10614 mask &= ~ howto->src_mask;
10622 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10624 addend += msec->output_section->vma + msec->output_offset;
10626 /* Cases here must match those in the preceding
10627 switch statement. */
10630 case R_ARM_MOVW_ABS_NC:
10631 case R_ARM_MOVT_ABS:
10632 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10633 | (addend & 0xfff);
10634 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10637 case R_ARM_THM_MOVW_ABS_NC:
10638 case R_ARM_THM_MOVT_ABS:
10639 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10640 | (addend & 0xff) | ((addend & 0x0800) << 15);
10641 bfd_put_16 (input_bfd, value >> 16,
10642 contents + rel->r_offset);
10643 bfd_put_16 (input_bfd, value,
10644 contents + rel->r_offset + 2);
10648 value = (value & ~ howto->dst_mask)
10649 | (addend & howto->dst_mask);
10650 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10656 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10660 bfd_boolean warned, ignored;
10662 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10663 r_symndx, symtab_hdr, sym_hashes,
10664 h, sec, relocation,
10665 unresolved_reloc, warned, ignored);
10667 sym_type = h->type;
10670 if (sec != NULL && discarded_section (sec))
10671 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10672 rel, 1, relend, howto, 0, contents);
10674 if (info->relocatable)
10676 /* This is a relocatable link. We don't have to change
10677 anything, unless the reloc is against a section symbol,
10678 in which case we have to adjust according to where the
10679 section symbol winds up in the output section. */
10680 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10682 if (globals->use_rel)
10683 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10684 howto, (bfd_signed_vma) sec->output_offset);
10686 rel->r_addend += sec->output_offset;
10692 name = h->root.root.string;
10695 name = (bfd_elf_string_from_elf_section
10696 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10697 if (name == NULL || *name == '\0')
10698 name = bfd_section_name (input_bfd, sec);
10701 if (r_symndx != STN_UNDEF
10702 && r_type != R_ARM_NONE
10704 || h->root.type == bfd_link_hash_defined
10705 || h->root.type == bfd_link_hash_defweak)
10706 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10708 (*_bfd_error_handler)
10709 ((sym_type == STT_TLS
10710 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10711 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10714 (long) rel->r_offset,
10719 /* We call elf32_arm_final_link_relocate unless we're completely
10720 done, i.e., the relaxation produced the final output we want,
10721 and we won't let anybody mess with it. Also, we have to do
10722 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10723 both in relaxed and non-relaxed cases. */
10724 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10725 || (IS_ARM_TLS_GNU_RELOC (r_type)
10726 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10727 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10730 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10731 contents, rel, h == NULL);
10732 /* This may have been marked unresolved because it came from
10733 a shared library. But we've just dealt with that. */
10734 unresolved_reloc = 0;
10737 r = bfd_reloc_continue;
10739 if (r == bfd_reloc_continue)
10740 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10741 input_section, contents, rel,
10742 relocation, info, sec, name, sym_type,
10743 (h ? h->target_internal
10744 : ARM_SYM_BRANCH_TYPE (sym)), h,
10745 &unresolved_reloc, &error_message);
10747 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10748 because such sections are not SEC_ALLOC and thus ld.so will
10749 not process them. */
10750 if (unresolved_reloc
10751 && !((input_section->flags & SEC_DEBUGGING) != 0
10753 && _bfd_elf_section_offset (output_bfd, info, input_section,
10754 rel->r_offset) != (bfd_vma) -1)
10756 (*_bfd_error_handler)
10757 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10760 (long) rel->r_offset,
10762 h->root.root.string);
10766 if (r != bfd_reloc_ok)
10770 case bfd_reloc_overflow:
10771 /* If the overflowing reloc was to an undefined symbol,
10772 we have already printed one error message and there
10773 is no point complaining again. */
10775 h->root.type != bfd_link_hash_undefined)
10776 && (!((*info->callbacks->reloc_overflow)
10777 (info, (h ? &h->root : NULL), name, howto->name,
10778 (bfd_vma) 0, input_bfd, input_section,
10783 case bfd_reloc_undefined:
10784 if (!((*info->callbacks->undefined_symbol)
10785 (info, name, input_bfd, input_section,
10786 rel->r_offset, TRUE)))
10790 case bfd_reloc_outofrange:
10791 error_message = _("out of range");
10794 case bfd_reloc_notsupported:
10795 error_message = _("unsupported relocation");
10798 case bfd_reloc_dangerous:
10799 /* error_message should already be set. */
10803 error_message = _("unknown error");
10804 /* Fall through. */
10807 BFD_ASSERT (error_message != NULL);
10808 if (!((*info->callbacks->reloc_dangerous)
10809 (info, error_message, input_bfd, input_section,
10820 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10821 adds the edit to the start of the list. (The list must be built in order of
10822 ascending TINDEX: the function's callers are primarily responsible for
10823 maintaining that condition). */
10826 add_unwind_table_edit (arm_unwind_table_edit **head,
10827 arm_unwind_table_edit **tail,
10828 arm_unwind_edit_type type,
10829 asection *linked_section,
10830 unsigned int tindex)
10832 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10833 xmalloc (sizeof (arm_unwind_table_edit));
10835 new_edit->type = type;
10836 new_edit->linked_section = linked_section;
10837 new_edit->index = tindex;
10841 new_edit->next = NULL;
10844 (*tail)->next = new_edit;
10846 (*tail) = new_edit;
10849 (*head) = new_edit;
10853 new_edit->next = *head;
10862 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10864 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10866 adjust_exidx_size(asection *exidx_sec, int adjust)
10870 if (!exidx_sec->rawsize)
10871 exidx_sec->rawsize = exidx_sec->size;
10873 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10874 out_sec = exidx_sec->output_section;
10875 /* Adjust size of output section. */
10876 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10879 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10881 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10883 struct _arm_elf_section_data *exidx_arm_data;
10885 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10886 add_unwind_table_edit (
10887 &exidx_arm_data->u.exidx.unwind_edit_list,
10888 &exidx_arm_data->u.exidx.unwind_edit_tail,
10889 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10891 adjust_exidx_size(exidx_sec, 8);
10894 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10895 made to those tables, such that:
10897 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10898 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10899 codes which have been inlined into the index).
10901 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10903 The edits are applied when the tables are written
10904 (in elf32_arm_write_section). */
10907 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10908 unsigned int num_text_sections,
10909 struct bfd_link_info *info,
10910 bfd_boolean merge_exidx_entries)
10913 unsigned int last_second_word = 0, i;
10914 asection *last_exidx_sec = NULL;
10915 asection *last_text_sec = NULL;
10916 int last_unwind_type = -1;
10918 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10920 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
10924 for (sec = inp->sections; sec != NULL; sec = sec->next)
10926 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10927 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10929 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10932 if (elf_sec->linked_to)
10934 Elf_Internal_Shdr *linked_hdr
10935 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10936 struct _arm_elf_section_data *linked_sec_arm_data
10937 = get_arm_elf_section_data (linked_hdr->bfd_section);
10939 if (linked_sec_arm_data == NULL)
10942 /* Link this .ARM.exidx section back from the text section it
10944 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10949 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10950 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10951 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10953 for (i = 0; i < num_text_sections; i++)
10955 asection *sec = text_section_order[i];
10956 asection *exidx_sec;
10957 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10958 struct _arm_elf_section_data *exidx_arm_data;
10959 bfd_byte *contents = NULL;
10960 int deleted_exidx_bytes = 0;
10962 arm_unwind_table_edit *unwind_edit_head = NULL;
10963 arm_unwind_table_edit *unwind_edit_tail = NULL;
10964 Elf_Internal_Shdr *hdr;
10967 if (arm_data == NULL)
10970 exidx_sec = arm_data->u.text.arm_exidx_sec;
10971 if (exidx_sec == NULL)
10973 /* Section has no unwind data. */
10974 if (last_unwind_type == 0 || !last_exidx_sec)
10977 /* Ignore zero sized sections. */
10978 if (sec->size == 0)
10981 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10982 last_unwind_type = 0;
10986 /* Skip /DISCARD/ sections. */
10987 if (bfd_is_abs_section (exidx_sec->output_section))
10990 hdr = &elf_section_data (exidx_sec)->this_hdr;
10991 if (hdr->sh_type != SHT_ARM_EXIDX)
10994 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10995 if (exidx_arm_data == NULL)
10998 ibfd = exidx_sec->owner;
11000 if (hdr->contents != NULL)
11001 contents = hdr->contents;
11002 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11006 for (j = 0; j < hdr->sh_size; j += 8)
11008 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11012 /* An EXIDX_CANTUNWIND entry. */
11013 if (second_word == 1)
11015 if (last_unwind_type == 0)
11019 /* Inlined unwinding data. Merge if equal to previous. */
11020 else if ((second_word & 0x80000000) != 0)
11022 if (merge_exidx_entries
11023 && last_second_word == second_word && last_unwind_type == 1)
11026 last_second_word = second_word;
11028 /* Normal table entry. In theory we could merge these too,
11029 but duplicate entries are likely to be much less common. */
11035 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
11036 DELETE_EXIDX_ENTRY, NULL, j / 8);
11038 deleted_exidx_bytes += 8;
11041 last_unwind_type = unwind_type;
11044 /* Free contents if we allocated it ourselves. */
11045 if (contents != hdr->contents)
11048 /* Record edits to be applied later (in elf32_arm_write_section). */
11049 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
11050 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
11052 if (deleted_exidx_bytes > 0)
11053 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
11055 last_exidx_sec = exidx_sec;
11056 last_text_sec = sec;
11059 /* Add terminating CANTUNWIND entry. */
11060 if (last_exidx_sec && last_unwind_type != 0)
11061 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11067 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
11068 bfd *ibfd, const char *name)
11070 asection *sec, *osec;
11072 sec = bfd_get_linker_section (ibfd, name);
11073 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
11076 osec = sec->output_section;
11077 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
11080 if (! bfd_set_section_contents (obfd, osec, sec->contents,
11081 sec->output_offset, sec->size))
11088 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
11090 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
11091 asection *sec, *osec;
11093 if (globals == NULL)
11096 /* Invoke the regular ELF backend linker to do all the work. */
11097 if (!bfd_elf_final_link (abfd, info))
11100 /* Process stub sections (eg BE8 encoding, ...). */
11101 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
11103 for (i=0; i<htab->top_id; i++)
11105 sec = htab->stub_group[i].stub_sec;
11106 /* Only process it once, in its link_sec slot. */
11107 if (sec && i == htab->stub_group[i].link_sec->id)
11109 osec = sec->output_section;
11110 elf32_arm_write_section (abfd, info, sec, sec->contents);
11111 if (! bfd_set_section_contents (abfd, osec, sec->contents,
11112 sec->output_offset, sec->size))
11117 /* Write out any glue sections now that we have created all the
11119 if (globals->bfd_of_glue_owner != NULL)
11121 if (! elf32_arm_output_glue_section (info, abfd,
11122 globals->bfd_of_glue_owner,
11123 ARM2THUMB_GLUE_SECTION_NAME))
11126 if (! elf32_arm_output_glue_section (info, abfd,
11127 globals->bfd_of_glue_owner,
11128 THUMB2ARM_GLUE_SECTION_NAME))
11131 if (! elf32_arm_output_glue_section (info, abfd,
11132 globals->bfd_of_glue_owner,
11133 VFP11_ERRATUM_VENEER_SECTION_NAME))
11136 if (! elf32_arm_output_glue_section (info, abfd,
11137 globals->bfd_of_glue_owner,
11138 ARM_BX_GLUE_SECTION_NAME))
11145 /* Return a best guess for the machine number based on the attributes. */
11147 static unsigned int
11148 bfd_arm_get_mach_from_attributes (bfd * abfd)
11150 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
11154 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
11155 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
11156 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
11158 case TAG_CPU_ARCH_V5TE:
11162 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
11163 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
11167 if (strcmp (name, "IWMMXT2") == 0)
11168 return bfd_mach_arm_iWMMXt2;
11170 if (strcmp (name, "IWMMXT") == 0)
11171 return bfd_mach_arm_iWMMXt;
11173 if (strcmp (name, "XSCALE") == 0)
11177 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
11178 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
11181 case 1: return bfd_mach_arm_iWMMXt;
11182 case 2: return bfd_mach_arm_iWMMXt2;
11183 default: return bfd_mach_arm_XScale;
11188 return bfd_mach_arm_5TE;
11192 return bfd_mach_arm_unknown;
11196 /* Set the right machine number. */
11199 elf32_arm_object_p (bfd *abfd)
11203 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11205 if (mach == bfd_mach_arm_unknown)
11207 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11208 mach = bfd_mach_arm_ep9312;
11210 mach = bfd_arm_get_mach_from_attributes (abfd);
11213 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11217 /* Function to keep ARM specific flags in the ELF header. */
11220 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11222 if (elf_flags_init (abfd)
11223 && elf_elfheader (abfd)->e_flags != flags)
11225 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11227 if (flags & EF_ARM_INTERWORK)
11228 (*_bfd_error_handler)
11229 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11233 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11239 elf_elfheader (abfd)->e_flags = flags;
11240 elf_flags_init (abfd) = TRUE;
11246 /* Copy backend specific data from one object module to another. */
11249 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11252 flagword out_flags;
11254 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11257 in_flags = elf_elfheader (ibfd)->e_flags;
11258 out_flags = elf_elfheader (obfd)->e_flags;
11260 if (elf_flags_init (obfd)
11261 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11262 && in_flags != out_flags)
11264 /* Cannot mix APCS26 and APCS32 code. */
11265 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11268 /* Cannot mix float APCS and non-float APCS code. */
11269 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11272 /* If the src and dest have different interworking flags
11273 then turn off the interworking bit. */
11274 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11276 if (out_flags & EF_ARM_INTERWORK)
11278 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11281 in_flags &= ~EF_ARM_INTERWORK;
11284 /* Likewise for PIC, though don't warn for this case. */
11285 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11286 in_flags &= ~EF_ARM_PIC;
11289 elf_elfheader (obfd)->e_flags = in_flags;
11290 elf_flags_init (obfd) = TRUE;
11292 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
11295 /* Values for Tag_ABI_PCS_R9_use. */
11304 /* Values for Tag_ABI_PCS_RW_data. */
11307 AEABI_PCS_RW_data_absolute,
11308 AEABI_PCS_RW_data_PCrel,
11309 AEABI_PCS_RW_data_SBrel,
11310 AEABI_PCS_RW_data_unused
11313 /* Values for Tag_ABI_enum_size. */
11319 AEABI_enum_forced_wide
11322 /* Determine whether an object attribute tag takes an integer, a
11326 elf32_arm_obj_attrs_arg_type (int tag)
11328 if (tag == Tag_compatibility)
11329 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11330 else if (tag == Tag_nodefaults)
11331 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11332 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11333 return ATTR_TYPE_FLAG_STR_VAL;
11335 return ATTR_TYPE_FLAG_INT_VAL;
11337 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11340 /* The ABI defines that Tag_conformance should be emitted first, and that
11341 Tag_nodefaults should be second (if either is defined). This sets those
11342 two positions, and bumps up the position of all the remaining tags to
11345 elf32_arm_obj_attrs_order (int num)
11347 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11348 return Tag_conformance;
11349 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11350 return Tag_nodefaults;
11351 if ((num - 2) < Tag_nodefaults)
11353 if ((num - 1) < Tag_conformance)
11358 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11360 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11362 if ((tag & 127) < 64)
11365 (_("%B: Unknown mandatory EABI object attribute %d"),
11367 bfd_set_error (bfd_error_bad_value);
11373 (_("Warning: %B: Unknown EABI object attribute %d"),
11379 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11380 Returns -1 if no architecture could be read. */
11383 get_secondary_compatible_arch (bfd *abfd)
11385 obj_attribute *attr =
11386 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11388 /* Note: the tag and its argument below are uleb128 values, though
11389 currently-defined values fit in one byte for each. */
11391 && attr->s[0] == Tag_CPU_arch
11392 && (attr->s[1] & 128) != 128
11393 && attr->s[2] == 0)
11396 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11400 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11401 The tag is removed if ARCH is -1. */
11404 set_secondary_compatible_arch (bfd *abfd, int arch)
11406 obj_attribute *attr =
11407 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11415 /* Note: the tag and its argument below are uleb128 values, though
11416 currently-defined values fit in one byte for each. */
11418 attr->s = (char *) bfd_alloc (abfd, 3);
11419 attr->s[0] = Tag_CPU_arch;
11424 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11428 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11429 int newtag, int secondary_compat)
11431 #define T(X) TAG_CPU_ARCH_##X
11432 int tagl, tagh, result;
11435 T(V6T2), /* PRE_V4. */
11437 T(V6T2), /* V4T. */
11438 T(V6T2), /* V5T. */
11439 T(V6T2), /* V5TE. */
11440 T(V6T2), /* V5TEJ. */
11443 T(V6T2) /* V6T2. */
11447 T(V6K), /* PRE_V4. */
11451 T(V6K), /* V5TE. */
11452 T(V6K), /* V5TEJ. */
11454 T(V6KZ), /* V6KZ. */
11460 T(V7), /* PRE_V4. */
11465 T(V7), /* V5TEJ. */
11478 T(V6K), /* V5TE. */
11479 T(V6K), /* V5TEJ. */
11481 T(V6KZ), /* V6KZ. */
11485 T(V6_M) /* V6_M. */
11487 const int v6s_m[] =
11493 T(V6K), /* V5TE. */
11494 T(V6K), /* V5TEJ. */
11496 T(V6KZ), /* V6KZ. */
11500 T(V6S_M), /* V6_M. */
11501 T(V6S_M) /* V6S_M. */
11503 const int v7e_m[] =
11507 T(V7E_M), /* V4T. */
11508 T(V7E_M), /* V5T. */
11509 T(V7E_M), /* V5TE. */
11510 T(V7E_M), /* V5TEJ. */
11511 T(V7E_M), /* V6. */
11512 T(V7E_M), /* V6KZ. */
11513 T(V7E_M), /* V6T2. */
11514 T(V7E_M), /* V6K. */
11515 T(V7E_M), /* V7. */
11516 T(V7E_M), /* V6_M. */
11517 T(V7E_M), /* V6S_M. */
11518 T(V7E_M) /* V7E_M. */
11522 T(V8), /* PRE_V4. */
11527 T(V8), /* V5TEJ. */
11534 T(V8), /* V6S_M. */
11535 T(V8), /* V7E_M. */
11538 const int v4t_plus_v6_m[] =
11544 T(V5TE), /* V5TE. */
11545 T(V5TEJ), /* V5TEJ. */
11547 T(V6KZ), /* V6KZ. */
11548 T(V6T2), /* V6T2. */
11551 T(V6_M), /* V6_M. */
11552 T(V6S_M), /* V6S_M. */
11553 T(V7E_M), /* V7E_M. */
11555 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11557 const int *comb[] =
11566 /* Pseudo-architecture. */
11570 /* Check we've not got a higher architecture than we know about. */
11572 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11574 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11578 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11580 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11581 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11582 oldtag = T(V4T_PLUS_V6_M);
11584 /* And override the new tag if we have a Tag_also_compatible_with on the
11587 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11588 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11589 newtag = T(V4T_PLUS_V6_M);
11591 tagl = (oldtag < newtag) ? oldtag : newtag;
11592 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11594 /* Architectures before V6KZ add features monotonically. */
11595 if (tagh <= TAG_CPU_ARCH_V6KZ)
11598 result = comb[tagh - T(V6T2)][tagl];
11600 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11601 as the canonical version. */
11602 if (result == T(V4T_PLUS_V6_M))
11605 *secondary_compat_out = T(V6_M);
11608 *secondary_compat_out = -1;
11612 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11613 ibfd, oldtag, newtag);
11621 /* Query attributes object to see if integer divide instructions may be
11622 present in an object. */
11624 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11626 int arch = attr[Tag_CPU_arch].i;
11627 int profile = attr[Tag_CPU_arch_profile].i;
11629 switch (attr[Tag_DIV_use].i)
11632 /* Integer divide allowed if instruction contained in archetecture. */
11633 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11635 else if (arch >= TAG_CPU_ARCH_V7E_M)
11641 /* Integer divide explicitly prohibited. */
11645 /* Unrecognised case - treat as allowing divide everywhere. */
11647 /* Integer divide allowed in ARM state. */
11652 /* Query attributes object to see if integer divide instructions are
11653 forbidden to be in the object. This is not the inverse of
11654 elf32_arm_attributes_accept_div. */
11656 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11658 return attr[Tag_DIV_use].i == 1;
11661 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11662 are conflicting attributes. */
11665 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11667 obj_attribute *in_attr;
11668 obj_attribute *out_attr;
11669 /* Some tags have 0 = don't care, 1 = strong requirement,
11670 2 = weak requirement. */
11671 static const int order_021[3] = {0, 2, 1};
11673 bfd_boolean result = TRUE;
11675 /* Skip the linker stubs file. This preserves previous behavior
11676 of accepting unknown attributes in the first input file - but
11678 if (ibfd->flags & BFD_LINKER_CREATED)
11681 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11683 /* This is the first object. Copy the attributes. */
11684 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11686 out_attr = elf_known_obj_attributes_proc (obfd);
11688 /* Use the Tag_null value to indicate the attributes have been
11692 /* We do not output objects with Tag_MPextension_use_legacy - we move
11693 the attribute's value to Tag_MPextension_use. */
11694 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11696 if (out_attr[Tag_MPextension_use].i != 0
11697 && out_attr[Tag_MPextension_use_legacy].i
11698 != out_attr[Tag_MPextension_use].i)
11701 (_("Error: %B has both the current and legacy "
11702 "Tag_MPextension_use attributes"), ibfd);
11706 out_attr[Tag_MPextension_use] =
11707 out_attr[Tag_MPextension_use_legacy];
11708 out_attr[Tag_MPextension_use_legacy].type = 0;
11709 out_attr[Tag_MPextension_use_legacy].i = 0;
11715 in_attr = elf_known_obj_attributes_proc (ibfd);
11716 out_attr = elf_known_obj_attributes_proc (obfd);
11717 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11718 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11720 /* Ignore mismatches if the object doesn't use floating point. */
11721 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11722 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11723 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11726 (_("error: %B uses VFP register arguments, %B does not"),
11727 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11728 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11733 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11735 /* Merge this attribute with existing attributes. */
11738 case Tag_CPU_raw_name:
11740 /* These are merged after Tag_CPU_arch. */
11743 case Tag_ABI_optimization_goals:
11744 case Tag_ABI_FP_optimization_goals:
11745 /* Use the first value seen. */
11750 int secondary_compat = -1, secondary_compat_out = -1;
11751 unsigned int saved_out_attr = out_attr[i].i;
11752 static const char *name_table[] = {
11753 /* These aren't real CPU names, but we can't guess
11754 that from the architecture version alone. */
11771 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11772 secondary_compat = get_secondary_compatible_arch (ibfd);
11773 secondary_compat_out = get_secondary_compatible_arch (obfd);
11774 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11775 &secondary_compat_out,
11778 set_secondary_compatible_arch (obfd, secondary_compat_out);
11780 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11781 if (out_attr[i].i == saved_out_attr)
11782 ; /* Leave the names alone. */
11783 else if (out_attr[i].i == in_attr[i].i)
11785 /* The output architecture has been changed to match the
11786 input architecture. Use the input names. */
11787 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11788 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11790 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11791 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11796 out_attr[Tag_CPU_name].s = NULL;
11797 out_attr[Tag_CPU_raw_name].s = NULL;
11800 /* If we still don't have a value for Tag_CPU_name,
11801 make one up now. Tag_CPU_raw_name remains blank. */
11802 if (out_attr[Tag_CPU_name].s == NULL
11803 && out_attr[i].i < ARRAY_SIZE (name_table))
11804 out_attr[Tag_CPU_name].s =
11805 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11809 case Tag_ARM_ISA_use:
11810 case Tag_THUMB_ISA_use:
11811 case Tag_WMMX_arch:
11812 case Tag_Advanced_SIMD_arch:
11813 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11814 case Tag_ABI_FP_rounding:
11815 case Tag_ABI_FP_exceptions:
11816 case Tag_ABI_FP_user_exceptions:
11817 case Tag_ABI_FP_number_model:
11818 case Tag_FP_HP_extension:
11819 case Tag_CPU_unaligned_access:
11821 case Tag_MPextension_use:
11822 /* Use the largest value specified. */
11823 if (in_attr[i].i > out_attr[i].i)
11824 out_attr[i].i = in_attr[i].i;
11827 case Tag_ABI_align_preserved:
11828 case Tag_ABI_PCS_RO_data:
11829 /* Use the smallest value specified. */
11830 if (in_attr[i].i < out_attr[i].i)
11831 out_attr[i].i = in_attr[i].i;
11834 case Tag_ABI_align_needed:
11835 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11836 && (in_attr[Tag_ABI_align_preserved].i == 0
11837 || out_attr[Tag_ABI_align_preserved].i == 0))
11839 /* This error message should be enabled once all non-conformant
11840 binaries in the toolchain have had the attributes set
11843 (_("error: %B: 8-byte data alignment conflicts with %B"),
11847 /* Fall through. */
11848 case Tag_ABI_FP_denormal:
11849 case Tag_ABI_PCS_GOT_use:
11850 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11851 value if greater than 2 (for future-proofing). */
11852 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11853 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11854 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11855 out_attr[i].i = in_attr[i].i;
11858 case Tag_Virtualization_use:
11859 /* The virtualization tag effectively stores two bits of
11860 information: the intended use of TrustZone (in bit 0), and the
11861 intended use of Virtualization (in bit 1). */
11862 if (out_attr[i].i == 0)
11863 out_attr[i].i = in_attr[i].i;
11864 else if (in_attr[i].i != 0
11865 && in_attr[i].i != out_attr[i].i)
11867 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11872 (_("error: %B: unable to merge virtualization attributes "
11880 case Tag_CPU_arch_profile:
11881 if (out_attr[i].i != in_attr[i].i)
11883 /* 0 will merge with anything.
11884 'A' and 'S' merge to 'A'.
11885 'R' and 'S' merge to 'R'.
11886 'M' and 'A|R|S' is an error. */
11887 if (out_attr[i].i == 0
11888 || (out_attr[i].i == 'S'
11889 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11890 out_attr[i].i = in_attr[i].i;
11891 else if (in_attr[i].i == 0
11892 || (in_attr[i].i == 'S'
11893 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11894 ; /* Do nothing. */
11898 (_("error: %B: Conflicting architecture profiles %c/%c"),
11900 in_attr[i].i ? in_attr[i].i : '0',
11901 out_attr[i].i ? out_attr[i].i : '0');
11908 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11909 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11910 when it's 0. It might mean absence of FP hardware if
11911 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11913 #define VFP_VERSION_COUNT 8
11914 static const struct
11918 } vfp_versions[VFP_VERSION_COUNT] =
11933 /* If the output has no requirement about FP hardware,
11934 follow the requirement of the input. */
11935 if (out_attr[i].i == 0)
11937 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11938 out_attr[i].i = in_attr[i].i;
11939 out_attr[Tag_ABI_HardFP_use].i
11940 = in_attr[Tag_ABI_HardFP_use].i;
11943 /* If the input has no requirement about FP hardware, do
11945 else if (in_attr[i].i == 0)
11947 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11951 /* Both the input and the output have nonzero Tag_FP_arch.
11952 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11954 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11956 if (in_attr[Tag_ABI_HardFP_use].i == 0
11957 && out_attr[Tag_ABI_HardFP_use].i == 0)
11959 /* If the input and the output have different Tag_ABI_HardFP_use,
11960 the combination of them is 3 (SP & DP). */
11961 else if (in_attr[Tag_ABI_HardFP_use].i
11962 != out_attr[Tag_ABI_HardFP_use].i)
11963 out_attr[Tag_ABI_HardFP_use].i = 3;
11965 /* Now we can handle Tag_FP_arch. */
11967 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
11968 pick the biggest. */
11969 if (in_attr[i].i >= VFP_VERSION_COUNT
11970 && in_attr[i].i > out_attr[i].i)
11972 out_attr[i] = in_attr[i];
11975 /* The output uses the superset of input features
11976 (ISA version) and registers. */
11977 ver = vfp_versions[in_attr[i].i].ver;
11978 if (ver < vfp_versions[out_attr[i].i].ver)
11979 ver = vfp_versions[out_attr[i].i].ver;
11980 regs = vfp_versions[in_attr[i].i].regs;
11981 if (regs < vfp_versions[out_attr[i].i].regs)
11982 regs = vfp_versions[out_attr[i].i].regs;
11983 /* This assumes all possible supersets are also a valid
11985 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
11987 if (regs == vfp_versions[newval].regs
11988 && ver == vfp_versions[newval].ver)
11991 out_attr[i].i = newval;
11994 case Tag_PCS_config:
11995 if (out_attr[i].i == 0)
11996 out_attr[i].i = in_attr[i].i;
11997 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
11999 /* It's sometimes ok to mix different configs, so this is only
12002 (_("Warning: %B: Conflicting platform configuration"), ibfd);
12005 case Tag_ABI_PCS_R9_use:
12006 if (in_attr[i].i != out_attr[i].i
12007 && out_attr[i].i != AEABI_R9_unused
12008 && in_attr[i].i != AEABI_R9_unused)
12011 (_("error: %B: Conflicting use of R9"), ibfd);
12014 if (out_attr[i].i == AEABI_R9_unused)
12015 out_attr[i].i = in_attr[i].i;
12017 case Tag_ABI_PCS_RW_data:
12018 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
12019 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
12020 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
12023 (_("error: %B: SB relative addressing conflicts with use of R9"),
12027 /* Use the smallest value specified. */
12028 if (in_attr[i].i < out_attr[i].i)
12029 out_attr[i].i = in_attr[i].i;
12031 case Tag_ABI_PCS_wchar_t:
12032 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
12033 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
12036 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
12037 ibfd, in_attr[i].i, out_attr[i].i);
12039 else if (in_attr[i].i && !out_attr[i].i)
12040 out_attr[i].i = in_attr[i].i;
12042 case Tag_ABI_enum_size:
12043 if (in_attr[i].i != AEABI_enum_unused)
12045 if (out_attr[i].i == AEABI_enum_unused
12046 || out_attr[i].i == AEABI_enum_forced_wide)
12048 /* The existing object is compatible with anything.
12049 Use whatever requirements the new object has. */
12050 out_attr[i].i = in_attr[i].i;
12052 else if (in_attr[i].i != AEABI_enum_forced_wide
12053 && out_attr[i].i != in_attr[i].i
12054 && !elf_arm_tdata (obfd)->no_enum_size_warning)
12056 static const char *aeabi_enum_names[] =
12057 { "", "variable-size", "32-bit", "" };
12058 const char *in_name =
12059 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12060 ? aeabi_enum_names[in_attr[i].i]
12062 const char *out_name =
12063 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12064 ? aeabi_enum_names[out_attr[i].i]
12067 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
12068 ibfd, in_name, out_name);
12072 case Tag_ABI_VFP_args:
12075 case Tag_ABI_WMMX_args:
12076 if (in_attr[i].i != out_attr[i].i)
12079 (_("error: %B uses iWMMXt register arguments, %B does not"),
12084 case Tag_compatibility:
12085 /* Merged in target-independent code. */
12087 case Tag_ABI_HardFP_use:
12088 /* This is handled along with Tag_FP_arch. */
12090 case Tag_ABI_FP_16bit_format:
12091 if (in_attr[i].i != 0 && out_attr[i].i != 0)
12093 if (in_attr[i].i != out_attr[i].i)
12096 (_("error: fp16 format mismatch between %B and %B"),
12101 if (in_attr[i].i != 0)
12102 out_attr[i].i = in_attr[i].i;
12106 /* A value of zero on input means that the divide instruction may
12107 be used if available in the base architecture as specified via
12108 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
12109 the user did not want divide instructions. A value of 2
12110 explicitly means that divide instructions were allowed in ARM
12111 and Thumb state. */
12112 if (in_attr[i].i == out_attr[i].i)
12113 /* Do nothing. */ ;
12114 else if (elf32_arm_attributes_forbid_div (in_attr)
12115 && !elf32_arm_attributes_accept_div (out_attr))
12117 else if (elf32_arm_attributes_forbid_div (out_attr)
12118 && elf32_arm_attributes_accept_div (in_attr))
12119 out_attr[i].i = in_attr[i].i;
12120 else if (in_attr[i].i == 2)
12121 out_attr[i].i = in_attr[i].i;
12124 case Tag_MPextension_use_legacy:
12125 /* We don't output objects with Tag_MPextension_use_legacy - we
12126 move the value to Tag_MPextension_use. */
12127 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
12129 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
12132 (_("%B has has both the current and legacy "
12133 "Tag_MPextension_use attributes"),
12139 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
12140 out_attr[Tag_MPextension_use] = in_attr[i];
12144 case Tag_nodefaults:
12145 /* This tag is set if it exists, but the value is unused (and is
12146 typically zero). We don't actually need to do anything here -
12147 the merge happens automatically when the type flags are merged
12150 case Tag_also_compatible_with:
12151 /* Already done in Tag_CPU_arch. */
12153 case Tag_conformance:
12154 /* Keep the attribute if it matches. Throw it away otherwise.
12155 No attribute means no claim to conform. */
12156 if (!in_attr[i].s || !out_attr[i].s
12157 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
12158 out_attr[i].s = NULL;
12163 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
12166 /* If out_attr was copied from in_attr then it won't have a type yet. */
12167 if (in_attr[i].type && !out_attr[i].type)
12168 out_attr[i].type = in_attr[i].type;
12171 /* Merge Tag_compatibility attributes and any common GNU ones. */
12172 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
12175 /* Check for any attributes not known on ARM. */
12176 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
12182 /* Return TRUE if the two EABI versions are incompatible. */
12185 elf32_arm_versions_compatible (unsigned iver, unsigned over)
12187 /* v4 and v5 are the same spec before and after it was released,
12188 so allow mixing them. */
12189 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
12190 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
12193 return (iver == over);
12196 /* Merge backend specific data from an object file to the output
12197 object file when linking. */
12200 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
12202 /* Display the flags field. */
12205 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
12207 FILE * file = (FILE *) ptr;
12208 unsigned long flags;
12210 BFD_ASSERT (abfd != NULL && ptr != NULL);
12212 /* Print normal ELF private data. */
12213 _bfd_elf_print_private_bfd_data (abfd, ptr);
12215 flags = elf_elfheader (abfd)->e_flags;
12216 /* Ignore init flag - it may not be set, despite the flags field
12217 containing valid data. */
12219 /* xgettext:c-format */
12220 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12222 switch (EF_ARM_EABI_VERSION (flags))
12224 case EF_ARM_EABI_UNKNOWN:
12225 /* The following flag bits are GNU extensions and not part of the
12226 official ARM ELF extended ABI. Hence they are only decoded if
12227 the EABI version is not set. */
12228 if (flags & EF_ARM_INTERWORK)
12229 fprintf (file, _(" [interworking enabled]"));
12231 if (flags & EF_ARM_APCS_26)
12232 fprintf (file, " [APCS-26]");
12234 fprintf (file, " [APCS-32]");
12236 if (flags & EF_ARM_VFP_FLOAT)
12237 fprintf (file, _(" [VFP float format]"));
12238 else if (flags & EF_ARM_MAVERICK_FLOAT)
12239 fprintf (file, _(" [Maverick float format]"));
12241 fprintf (file, _(" [FPA float format]"));
12243 if (flags & EF_ARM_APCS_FLOAT)
12244 fprintf (file, _(" [floats passed in float registers]"));
12246 if (flags & EF_ARM_PIC)
12247 fprintf (file, _(" [position independent]"));
12249 if (flags & EF_ARM_NEW_ABI)
12250 fprintf (file, _(" [new ABI]"));
12252 if (flags & EF_ARM_OLD_ABI)
12253 fprintf (file, _(" [old ABI]"));
12255 if (flags & EF_ARM_SOFT_FLOAT)
12256 fprintf (file, _(" [software FP]"));
12258 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12259 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12260 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12261 | EF_ARM_MAVERICK_FLOAT);
12264 case EF_ARM_EABI_VER1:
12265 fprintf (file, _(" [Version1 EABI]"));
12267 if (flags & EF_ARM_SYMSARESORTED)
12268 fprintf (file, _(" [sorted symbol table]"));
12270 fprintf (file, _(" [unsorted symbol table]"));
12272 flags &= ~ EF_ARM_SYMSARESORTED;
12275 case EF_ARM_EABI_VER2:
12276 fprintf (file, _(" [Version2 EABI]"));
12278 if (flags & EF_ARM_SYMSARESORTED)
12279 fprintf (file, _(" [sorted symbol table]"));
12281 fprintf (file, _(" [unsorted symbol table]"));
12283 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12284 fprintf (file, _(" [dynamic symbols use segment index]"));
12286 if (flags & EF_ARM_MAPSYMSFIRST)
12287 fprintf (file, _(" [mapping symbols precede others]"));
12289 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12290 | EF_ARM_MAPSYMSFIRST);
12293 case EF_ARM_EABI_VER3:
12294 fprintf (file, _(" [Version3 EABI]"));
12297 case EF_ARM_EABI_VER4:
12298 fprintf (file, _(" [Version4 EABI]"));
12301 case EF_ARM_EABI_VER5:
12302 fprintf (file, _(" [Version5 EABI]"));
12304 if (flags & EF_ARM_ABI_FLOAT_SOFT)
12305 fprintf (file, _(" [soft-float ABI]"));
12307 if (flags & EF_ARM_ABI_FLOAT_HARD)
12308 fprintf (file, _(" [hard-float ABI]"));
12310 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
12313 if (flags & EF_ARM_BE8)
12314 fprintf (file, _(" [BE8]"));
12316 if (flags & EF_ARM_LE8)
12317 fprintf (file, _(" [LE8]"));
12319 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12323 fprintf (file, _(" <EABI version unrecognised>"));
12327 flags &= ~ EF_ARM_EABIMASK;
12329 if (flags & EF_ARM_RELEXEC)
12330 fprintf (file, _(" [relocatable executable]"));
12332 if (flags & EF_ARM_HASENTRY)
12333 fprintf (file, _(" [has entry point]"));
12335 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
12338 fprintf (file, _("<Unrecognised flag bits set>"));
12340 fputc ('\n', file);
12346 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12348 switch (ELF_ST_TYPE (elf_sym->st_info))
12350 case STT_ARM_TFUNC:
12351 return ELF_ST_TYPE (elf_sym->st_info);
12353 case STT_ARM_16BIT:
12354 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12355 This allows us to distinguish between data used by Thumb instructions
12356 and non-data (which is probably code) inside Thumb regions of an
12358 if (type != STT_OBJECT && type != STT_TLS)
12359 return ELF_ST_TYPE (elf_sym->st_info);
12370 elf32_arm_gc_mark_hook (asection *sec,
12371 struct bfd_link_info *info,
12372 Elf_Internal_Rela *rel,
12373 struct elf_link_hash_entry *h,
12374 Elf_Internal_Sym *sym)
12377 switch (ELF32_R_TYPE (rel->r_info))
12379 case R_ARM_GNU_VTINHERIT:
12380 case R_ARM_GNU_VTENTRY:
12384 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12387 /* Update the got entry reference counts for the section being removed. */
12390 elf32_arm_gc_sweep_hook (bfd * abfd,
12391 struct bfd_link_info * info,
12393 const Elf_Internal_Rela * relocs)
12395 Elf_Internal_Shdr *symtab_hdr;
12396 struct elf_link_hash_entry **sym_hashes;
12397 bfd_signed_vma *local_got_refcounts;
12398 const Elf_Internal_Rela *rel, *relend;
12399 struct elf32_arm_link_hash_table * globals;
12401 if (info->relocatable)
12404 globals = elf32_arm_hash_table (info);
12405 if (globals == NULL)
12408 elf_section_data (sec)->local_dynrel = NULL;
12410 symtab_hdr = & elf_symtab_hdr (abfd);
12411 sym_hashes = elf_sym_hashes (abfd);
12412 local_got_refcounts = elf_local_got_refcounts (abfd);
12414 check_use_blx (globals);
12416 relend = relocs + sec->reloc_count;
12417 for (rel = relocs; rel < relend; rel++)
12419 unsigned long r_symndx;
12420 struct elf_link_hash_entry *h = NULL;
12421 struct elf32_arm_link_hash_entry *eh;
12423 bfd_boolean call_reloc_p;
12424 bfd_boolean may_become_dynamic_p;
12425 bfd_boolean may_need_local_target_p;
12426 union gotplt_union *root_plt;
12427 struct arm_plt_info *arm_plt;
12429 r_symndx = ELF32_R_SYM (rel->r_info);
12430 if (r_symndx >= symtab_hdr->sh_info)
12432 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12433 while (h->root.type == bfd_link_hash_indirect
12434 || h->root.type == bfd_link_hash_warning)
12435 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12437 eh = (struct elf32_arm_link_hash_entry *) h;
12439 call_reloc_p = FALSE;
12440 may_become_dynamic_p = FALSE;
12441 may_need_local_target_p = FALSE;
12443 r_type = ELF32_R_TYPE (rel->r_info);
12444 r_type = arm_real_reloc_type (globals, r_type);
12448 case R_ARM_GOT_PREL:
12449 case R_ARM_TLS_GD32:
12450 case R_ARM_TLS_IE32:
12453 if (h->got.refcount > 0)
12454 h->got.refcount -= 1;
12456 else if (local_got_refcounts != NULL)
12458 if (local_got_refcounts[r_symndx] > 0)
12459 local_got_refcounts[r_symndx] -= 1;
12463 case R_ARM_TLS_LDM32:
12464 globals->tls_ldm_got.refcount -= 1;
12472 case R_ARM_THM_CALL:
12473 case R_ARM_THM_JUMP24:
12474 case R_ARM_THM_JUMP19:
12475 call_reloc_p = TRUE;
12476 may_need_local_target_p = TRUE;
12480 if (!globals->vxworks_p)
12482 may_need_local_target_p = TRUE;
12485 /* Fall through. */
12487 case R_ARM_ABS32_NOI:
12489 case R_ARM_REL32_NOI:
12490 case R_ARM_MOVW_ABS_NC:
12491 case R_ARM_MOVT_ABS:
12492 case R_ARM_MOVW_PREL_NC:
12493 case R_ARM_MOVT_PREL:
12494 case R_ARM_THM_MOVW_ABS_NC:
12495 case R_ARM_THM_MOVT_ABS:
12496 case R_ARM_THM_MOVW_PREL_NC:
12497 case R_ARM_THM_MOVT_PREL:
12498 /* Should the interworking branches be here also? */
12499 if ((info->shared || globals->root.is_relocatable_executable)
12500 && (sec->flags & SEC_ALLOC) != 0)
12503 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12505 call_reloc_p = TRUE;
12506 may_need_local_target_p = TRUE;
12509 may_become_dynamic_p = TRUE;
12512 may_need_local_target_p = TRUE;
12519 if (may_need_local_target_p
12520 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12522 /* If PLT refcount book-keeping is wrong and too low, we'll
12523 see a zero value (going to -1) for the root PLT reference
12525 if (root_plt->refcount >= 0)
12527 BFD_ASSERT (root_plt->refcount != 0);
12528 root_plt->refcount -= 1;
12531 /* A value of -1 means the symbol has become local, forced
12532 or seeing a hidden definition. Any other negative value
12534 BFD_ASSERT (root_plt->refcount == -1);
12537 arm_plt->noncall_refcount--;
12539 if (r_type == R_ARM_THM_CALL)
12540 arm_plt->maybe_thumb_refcount--;
12542 if (r_type == R_ARM_THM_JUMP24
12543 || r_type == R_ARM_THM_JUMP19)
12544 arm_plt->thumb_refcount--;
12547 if (may_become_dynamic_p)
12549 struct elf_dyn_relocs **pp;
12550 struct elf_dyn_relocs *p;
12553 pp = &(eh->dyn_relocs);
12556 Elf_Internal_Sym *isym;
12558 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12562 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12566 for (; (p = *pp) != NULL; pp = &p->next)
12569 /* Everything must go for SEC. */
12579 /* Look through the relocs for a section during the first phase. */
12582 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12583 asection *sec, const Elf_Internal_Rela *relocs)
12585 Elf_Internal_Shdr *symtab_hdr;
12586 struct elf_link_hash_entry **sym_hashes;
12587 const Elf_Internal_Rela *rel;
12588 const Elf_Internal_Rela *rel_end;
12591 struct elf32_arm_link_hash_table *htab;
12592 bfd_boolean call_reloc_p;
12593 bfd_boolean may_become_dynamic_p;
12594 bfd_boolean may_need_local_target_p;
12595 unsigned long nsyms;
12597 if (info->relocatable)
12600 BFD_ASSERT (is_arm_elf (abfd));
12602 htab = elf32_arm_hash_table (info);
12608 /* Create dynamic sections for relocatable executables so that we can
12609 copy relocations. */
12610 if (htab->root.is_relocatable_executable
12611 && ! htab->root.dynamic_sections_created)
12613 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12617 if (htab->root.dynobj == NULL)
12618 htab->root.dynobj = abfd;
12619 if (!create_ifunc_sections (info))
12622 dynobj = htab->root.dynobj;
12624 symtab_hdr = & elf_symtab_hdr (abfd);
12625 sym_hashes = elf_sym_hashes (abfd);
12626 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12628 rel_end = relocs + sec->reloc_count;
12629 for (rel = relocs; rel < rel_end; rel++)
12631 Elf_Internal_Sym *isym;
12632 struct elf_link_hash_entry *h;
12633 struct elf32_arm_link_hash_entry *eh;
12634 unsigned long r_symndx;
12637 r_symndx = ELF32_R_SYM (rel->r_info);
12638 r_type = ELF32_R_TYPE (rel->r_info);
12639 r_type = arm_real_reloc_type (htab, r_type);
12641 if (r_symndx >= nsyms
12642 /* PR 9934: It is possible to have relocations that do not
12643 refer to symbols, thus it is also possible to have an
12644 object file containing relocations but no symbol table. */
12645 && (r_symndx > STN_UNDEF || nsyms > 0))
12647 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12656 if (r_symndx < symtab_hdr->sh_info)
12658 /* A local symbol. */
12659 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12666 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12667 while (h->root.type == bfd_link_hash_indirect
12668 || h->root.type == bfd_link_hash_warning)
12669 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12671 /* PR15323, ref flags aren't set for references in the
12673 h->root.non_ir_ref = 1;
12677 eh = (struct elf32_arm_link_hash_entry *) h;
12679 call_reloc_p = FALSE;
12680 may_become_dynamic_p = FALSE;
12681 may_need_local_target_p = FALSE;
12683 /* Could be done earlier, if h were already available. */
12684 r_type = elf32_arm_tls_transition (info, r_type, h);
12688 case R_ARM_GOT_PREL:
12689 case R_ARM_TLS_GD32:
12690 case R_ARM_TLS_IE32:
12691 case R_ARM_TLS_GOTDESC:
12692 case R_ARM_TLS_DESCSEQ:
12693 case R_ARM_THM_TLS_DESCSEQ:
12694 case R_ARM_TLS_CALL:
12695 case R_ARM_THM_TLS_CALL:
12696 /* This symbol requires a global offset table entry. */
12698 int tls_type, old_tls_type;
12702 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12704 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12706 case R_ARM_TLS_GOTDESC:
12707 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12708 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12709 tls_type = GOT_TLS_GDESC; break;
12711 default: tls_type = GOT_NORMAL; break;
12714 if (!info->executable && (tls_type & GOT_TLS_IE))
12715 info->flags |= DF_STATIC_TLS;
12720 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12724 /* This is a global offset table entry for a local symbol. */
12725 if (!elf32_arm_allocate_local_sym_info (abfd))
12727 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12728 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12731 /* If a variable is accessed with both tls methods, two
12732 slots may be created. */
12733 if (GOT_TLS_GD_ANY_P (old_tls_type)
12734 && GOT_TLS_GD_ANY_P (tls_type))
12735 tls_type |= old_tls_type;
12737 /* We will already have issued an error message if there
12738 is a TLS/non-TLS mismatch, based on the symbol
12739 type. So just combine any TLS types needed. */
12740 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12741 && tls_type != GOT_NORMAL)
12742 tls_type |= old_tls_type;
12744 /* If the symbol is accessed in both IE and GDESC
12745 method, we're able to relax. Turn off the GDESC flag,
12746 without messing up with any other kind of tls types
12747 that may be involved. */
12748 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12749 tls_type &= ~GOT_TLS_GDESC;
12751 if (old_tls_type != tls_type)
12754 elf32_arm_hash_entry (h)->tls_type = tls_type;
12756 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12759 /* Fall through. */
12761 case R_ARM_TLS_LDM32:
12762 if (r_type == R_ARM_TLS_LDM32)
12763 htab->tls_ldm_got.refcount++;
12764 /* Fall through. */
12766 case R_ARM_GOTOFF32:
12768 if (htab->root.sgot == NULL
12769 && !create_got_section (htab->root.dynobj, info))
12778 case R_ARM_THM_CALL:
12779 case R_ARM_THM_JUMP24:
12780 case R_ARM_THM_JUMP19:
12781 call_reloc_p = TRUE;
12782 may_need_local_target_p = TRUE;
12786 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12787 ldr __GOTT_INDEX__ offsets. */
12788 if (!htab->vxworks_p)
12790 may_need_local_target_p = TRUE;
12793 /* Fall through. */
12795 case R_ARM_MOVW_ABS_NC:
12796 case R_ARM_MOVT_ABS:
12797 case R_ARM_THM_MOVW_ABS_NC:
12798 case R_ARM_THM_MOVT_ABS:
12801 (*_bfd_error_handler)
12802 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12803 abfd, elf32_arm_howto_table_1[r_type].name,
12804 (h) ? h->root.root.string : "a local symbol");
12805 bfd_set_error (bfd_error_bad_value);
12809 /* Fall through. */
12811 case R_ARM_ABS32_NOI:
12812 if (h != NULL && info->executable)
12814 h->pointer_equality_needed = 1;
12816 /* Fall through. */
12818 case R_ARM_REL32_NOI:
12819 case R_ARM_MOVW_PREL_NC:
12820 case R_ARM_MOVT_PREL:
12821 case R_ARM_THM_MOVW_PREL_NC:
12822 case R_ARM_THM_MOVT_PREL:
12824 /* Should the interworking branches be listed here? */
12825 if ((info->shared || htab->root.is_relocatable_executable)
12826 && (sec->flags & SEC_ALLOC) != 0)
12829 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12831 /* In shared libraries and relocatable executables,
12832 we treat local relative references as calls;
12833 see the related SYMBOL_CALLS_LOCAL code in
12834 allocate_dynrelocs. */
12835 call_reloc_p = TRUE;
12836 may_need_local_target_p = TRUE;
12839 /* We are creating a shared library or relocatable
12840 executable, and this is a reloc against a global symbol,
12841 or a non-PC-relative reloc against a local symbol.
12842 We may need to copy the reloc into the output. */
12843 may_become_dynamic_p = TRUE;
12846 may_need_local_target_p = TRUE;
12849 /* This relocation describes the C++ object vtable hierarchy.
12850 Reconstruct it for later use during GC. */
12851 case R_ARM_GNU_VTINHERIT:
12852 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12856 /* This relocation describes which C++ vtable entries are actually
12857 used. Record for later use during GC. */
12858 case R_ARM_GNU_VTENTRY:
12859 BFD_ASSERT (h != NULL);
12861 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12869 /* We may need a .plt entry if the function this reloc
12870 refers to is in a different object, regardless of the
12871 symbol's type. We can't tell for sure yet, because
12872 something later might force the symbol local. */
12874 else if (may_need_local_target_p)
12875 /* If this reloc is in a read-only section, we might
12876 need a copy reloc. We can't check reliably at this
12877 stage whether the section is read-only, as input
12878 sections have not yet been mapped to output sections.
12879 Tentatively set the flag for now, and correct in
12880 adjust_dynamic_symbol. */
12881 h->non_got_ref = 1;
12884 if (may_need_local_target_p
12885 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12887 union gotplt_union *root_plt;
12888 struct arm_plt_info *arm_plt;
12889 struct arm_local_iplt_info *local_iplt;
12893 root_plt = &h->plt;
12894 arm_plt = &eh->plt;
12898 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12899 if (local_iplt == NULL)
12901 root_plt = &local_iplt->root;
12902 arm_plt = &local_iplt->arm;
12905 /* If the symbol is a function that doesn't bind locally,
12906 this relocation will need a PLT entry. */
12907 if (root_plt->refcount != -1)
12908 root_plt->refcount += 1;
12911 arm_plt->noncall_refcount++;
12913 /* It's too early to use htab->use_blx here, so we have to
12914 record possible blx references separately from
12915 relocs that definitely need a thumb stub. */
12917 if (r_type == R_ARM_THM_CALL)
12918 arm_plt->maybe_thumb_refcount += 1;
12920 if (r_type == R_ARM_THM_JUMP24
12921 || r_type == R_ARM_THM_JUMP19)
12922 arm_plt->thumb_refcount += 1;
12925 if (may_become_dynamic_p)
12927 struct elf_dyn_relocs *p, **head;
12929 /* Create a reloc section in dynobj. */
12930 if (sreloc == NULL)
12932 sreloc = _bfd_elf_make_dynamic_reloc_section
12933 (sec, dynobj, 2, abfd, ! htab->use_rel);
12935 if (sreloc == NULL)
12938 /* BPABI objects never have dynamic relocations mapped. */
12939 if (htab->symbian_p)
12943 flags = bfd_get_section_flags (dynobj, sreloc);
12944 flags &= ~(SEC_LOAD | SEC_ALLOC);
12945 bfd_set_section_flags (dynobj, sreloc, flags);
12949 /* If this is a global symbol, count the number of
12950 relocations we need for this symbol. */
12952 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12955 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12961 if (p == NULL || p->sec != sec)
12963 bfd_size_type amt = sizeof *p;
12965 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12975 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12984 /* Unwinding tables are not referenced directly. This pass marks them as
12985 required if the corresponding code section is marked. */
12988 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12989 elf_gc_mark_hook_fn gc_mark_hook)
12992 Elf_Internal_Shdr **elf_shdrp;
12995 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
12997 /* Marking EH data may cause additional code sections to be marked,
12998 requiring multiple passes. */
13003 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
13007 if (! is_arm_elf (sub))
13010 elf_shdrp = elf_elfsections (sub);
13011 for (o = sub->sections; o != NULL; o = o->next)
13013 Elf_Internal_Shdr *hdr;
13015 hdr = &elf_section_data (o)->this_hdr;
13016 if (hdr->sh_type == SHT_ARM_EXIDX
13018 && hdr->sh_link < elf_numsections (sub)
13020 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
13023 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
13033 /* Treat mapping symbols as special target symbols. */
13036 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
13038 return bfd_is_arm_special_symbol_name (sym->name,
13039 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
13042 /* This is a copy of elf_find_function() from elf.c except that
13043 ARM mapping symbols are ignored when looking for function names
13044 and STT_ARM_TFUNC is considered to a function type. */
13047 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
13048 asection * section,
13049 asymbol ** symbols,
13051 const char ** filename_ptr,
13052 const char ** functionname_ptr)
13054 const char * filename = NULL;
13055 asymbol * func = NULL;
13056 bfd_vma low_func = 0;
13059 for (p = symbols; *p != NULL; p++)
13061 elf_symbol_type *q;
13063 q = (elf_symbol_type *) *p;
13065 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
13070 filename = bfd_asymbol_name (&q->symbol);
13073 case STT_ARM_TFUNC:
13075 /* Skip mapping symbols. */
13076 if ((q->symbol.flags & BSF_LOCAL)
13077 && bfd_is_arm_special_symbol_name (q->symbol.name,
13078 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
13080 /* Fall through. */
13081 if (bfd_get_section (&q->symbol) == section
13082 && q->symbol.value >= low_func
13083 && q->symbol.value <= offset)
13085 func = (asymbol *) q;
13086 low_func = q->symbol.value;
13096 *filename_ptr = filename;
13097 if (functionname_ptr)
13098 *functionname_ptr = bfd_asymbol_name (func);
13104 /* Find the nearest line to a particular section and offset, for error
13105 reporting. This code is a duplicate of the code in elf.c, except
13106 that it uses arm_elf_find_function. */
13109 elf32_arm_find_nearest_line (bfd * abfd,
13110 asection * section,
13111 asymbol ** symbols,
13113 const char ** filename_ptr,
13114 const char ** functionname_ptr,
13115 unsigned int * line_ptr)
13117 bfd_boolean found = FALSE;
13119 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
13121 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
13122 section, symbols, offset,
13123 filename_ptr, functionname_ptr,
13125 & elf_tdata (abfd)->dwarf2_find_line_info))
13127 if (!*functionname_ptr)
13128 arm_elf_find_function (abfd, section, symbols, offset,
13129 *filename_ptr ? NULL : filename_ptr,
13135 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
13136 & found, filename_ptr,
13137 functionname_ptr, line_ptr,
13138 & elf_tdata (abfd)->line_info))
13141 if (found && (*functionname_ptr || *line_ptr))
13144 if (symbols == NULL)
13147 if (! arm_elf_find_function (abfd, section, symbols, offset,
13148 filename_ptr, functionname_ptr))
13156 elf32_arm_find_inliner_info (bfd * abfd,
13157 const char ** filename_ptr,
13158 const char ** functionname_ptr,
13159 unsigned int * line_ptr)
13162 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
13163 functionname_ptr, line_ptr,
13164 & elf_tdata (abfd)->dwarf2_find_line_info);
13168 /* Adjust a symbol defined by a dynamic object and referenced by a
13169 regular object. The current definition is in some section of the
13170 dynamic object, but we're not including those sections. We have to
13171 change the definition to something the rest of the link can
13175 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
13176 struct elf_link_hash_entry * h)
13180 struct elf32_arm_link_hash_entry * eh;
13181 struct elf32_arm_link_hash_table *globals;
13183 globals = elf32_arm_hash_table (info);
13184 if (globals == NULL)
13187 dynobj = elf_hash_table (info)->dynobj;
13189 /* Make sure we know what is going on here. */
13190 BFD_ASSERT (dynobj != NULL
13192 || h->type == STT_GNU_IFUNC
13193 || h->u.weakdef != NULL
13196 && !h->def_regular)));
13198 eh = (struct elf32_arm_link_hash_entry *) h;
13200 /* If this is a function, put it in the procedure linkage table. We
13201 will fill in the contents of the procedure linkage table later,
13202 when we know the address of the .got section. */
13203 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
13205 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13206 symbol binds locally. */
13207 if (h->plt.refcount <= 0
13208 || (h->type != STT_GNU_IFUNC
13209 && (SYMBOL_CALLS_LOCAL (info, h)
13210 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
13211 && h->root.type == bfd_link_hash_undefweak))))
13213 /* This case can occur if we saw a PLT32 reloc in an input
13214 file, but the symbol was never referred to by a dynamic
13215 object, or if all references were garbage collected. In
13216 such a case, we don't actually need to build a procedure
13217 linkage table, and we can just do a PC24 reloc instead. */
13218 h->plt.offset = (bfd_vma) -1;
13219 eh->plt.thumb_refcount = 0;
13220 eh->plt.maybe_thumb_refcount = 0;
13221 eh->plt.noncall_refcount = 0;
13229 /* It's possible that we incorrectly decided a .plt reloc was
13230 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13231 in check_relocs. We can't decide accurately between function
13232 and non-function syms in check-relocs; Objects loaded later in
13233 the link may change h->type. So fix it now. */
13234 h->plt.offset = (bfd_vma) -1;
13235 eh->plt.thumb_refcount = 0;
13236 eh->plt.maybe_thumb_refcount = 0;
13237 eh->plt.noncall_refcount = 0;
13240 /* If this is a weak symbol, and there is a real definition, the
13241 processor independent code will have arranged for us to see the
13242 real definition first, and we can just use the same value. */
13243 if (h->u.weakdef != NULL)
13245 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13246 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13247 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13248 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13252 /* If there are no non-GOT references, we do not need a copy
13254 if (!h->non_got_ref)
13257 /* This is a reference to a symbol defined by a dynamic object which
13258 is not a function. */
13260 /* If we are creating a shared library, we must presume that the
13261 only references to the symbol are via the global offset table.
13262 For such cases we need not do anything here; the relocations will
13263 be handled correctly by relocate_section. Relocatable executables
13264 can reference data in shared objects directly, so we don't need to
13265 do anything here. */
13266 if (info->shared || globals->root.is_relocatable_executable)
13269 /* We must allocate the symbol in our .dynbss section, which will
13270 become part of the .bss section of the executable. There will be
13271 an entry for this symbol in the .dynsym section. The dynamic
13272 object will contain position independent code, so all references
13273 from the dynamic object to this symbol will go through the global
13274 offset table. The dynamic linker will use the .dynsym entry to
13275 determine the address it must put in the global offset table, so
13276 both the dynamic object and the regular object will refer to the
13277 same memory location for the variable. */
13278 s = bfd_get_linker_section (dynobj, ".dynbss");
13279 BFD_ASSERT (s != NULL);
13281 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13282 copy the initial value out of the dynamic object and into the
13283 runtime process image. We need to remember the offset into the
13284 .rel(a).bss section we are going to use. */
13285 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13289 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
13290 elf32_arm_allocate_dynrelocs (info, srel, 1);
13294 return _bfd_elf_adjust_dynamic_copy (h, s);
13297 /* Allocate space in .plt, .got and associated reloc sections for
13301 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13303 struct bfd_link_info *info;
13304 struct elf32_arm_link_hash_table *htab;
13305 struct elf32_arm_link_hash_entry *eh;
13306 struct elf_dyn_relocs *p;
13308 if (h->root.type == bfd_link_hash_indirect)
13311 eh = (struct elf32_arm_link_hash_entry *) h;
13313 info = (struct bfd_link_info *) inf;
13314 htab = elf32_arm_hash_table (info);
13318 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13319 && h->plt.refcount > 0)
13321 /* Make sure this symbol is output as a dynamic symbol.
13322 Undefined weak syms won't yet be marked as dynamic. */
13323 if (h->dynindx == -1
13324 && !h->forced_local)
13326 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13330 /* If the call in the PLT entry binds locally, the associated
13331 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13332 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13333 than the .plt section. */
13334 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13337 if (eh->plt.noncall_refcount == 0
13338 && SYMBOL_REFERENCES_LOCAL (info, h))
13339 /* All non-call references can be resolved directly.
13340 This means that they can (and in some cases, must)
13341 resolve directly to the run-time target, rather than
13342 to the PLT. That in turns means that any .got entry
13343 would be equal to the .igot.plt entry, so there's
13344 no point having both. */
13345 h->got.refcount = 0;
13350 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13352 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13354 /* If this symbol is not defined in a regular file, and we are
13355 not generating a shared library, then set the symbol to this
13356 location in the .plt. This is required to make function
13357 pointers compare as equal between the normal executable and
13358 the shared library. */
13360 && !h->def_regular)
13362 h->root.u.def.section = htab->root.splt;
13363 h->root.u.def.value = h->plt.offset;
13365 /* Make sure the function is not marked as Thumb, in case
13366 it is the target of an ABS32 relocation, which will
13367 point to the PLT entry. */
13368 h->target_internal = ST_BRANCH_TO_ARM;
13371 /* VxWorks executables have a second set of relocations for
13372 each PLT entry. They go in a separate relocation section,
13373 which is processed by the kernel loader. */
13374 if (htab->vxworks_p && !info->shared)
13376 /* There is a relocation for the initial PLT entry:
13377 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13378 if (h->plt.offset == htab->plt_header_size)
13379 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13381 /* There are two extra relocations for each subsequent
13382 PLT entry: an R_ARM_32 relocation for the GOT entry,
13383 and an R_ARM_32 relocation for the PLT entry. */
13384 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13389 h->plt.offset = (bfd_vma) -1;
13395 h->plt.offset = (bfd_vma) -1;
13399 eh = (struct elf32_arm_link_hash_entry *) h;
13400 eh->tlsdesc_got = (bfd_vma) -1;
13402 if (h->got.refcount > 0)
13406 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13409 /* Make sure this symbol is output as a dynamic symbol.
13410 Undefined weak syms won't yet be marked as dynamic. */
13411 if (h->dynindx == -1
13412 && !h->forced_local)
13414 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13418 if (!htab->symbian_p)
13420 s = htab->root.sgot;
13421 h->got.offset = s->size;
13423 if (tls_type == GOT_UNKNOWN)
13426 if (tls_type == GOT_NORMAL)
13427 /* Non-TLS symbols need one GOT slot. */
13431 if (tls_type & GOT_TLS_GDESC)
13433 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13435 = (htab->root.sgotplt->size
13436 - elf32_arm_compute_jump_table_size (htab));
13437 htab->root.sgotplt->size += 8;
13438 h->got.offset = (bfd_vma) -2;
13439 /* plt.got_offset needs to know there's a TLS_DESC
13440 reloc in the middle of .got.plt. */
13441 htab->num_tls_desc++;
13444 if (tls_type & GOT_TLS_GD)
13446 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13447 the symbol is both GD and GDESC, got.offset may
13448 have been overwritten. */
13449 h->got.offset = s->size;
13453 if (tls_type & GOT_TLS_IE)
13454 /* R_ARM_TLS_IE32 needs one GOT slot. */
13458 dyn = htab->root.dynamic_sections_created;
13461 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13463 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13466 if (tls_type != GOT_NORMAL
13467 && (info->shared || indx != 0)
13468 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13469 || h->root.type != bfd_link_hash_undefweak))
13471 if (tls_type & GOT_TLS_IE)
13472 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13474 if (tls_type & GOT_TLS_GD)
13475 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13477 if (tls_type & GOT_TLS_GDESC)
13479 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13480 /* GDESC needs a trampoline to jump to. */
13481 htab->tls_trampoline = -1;
13484 /* Only GD needs it. GDESC just emits one relocation per
13486 if ((tls_type & GOT_TLS_GD) && indx != 0)
13487 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13489 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
13491 if (htab->root.dynamic_sections_created)
13492 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13493 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13495 else if (h->type == STT_GNU_IFUNC
13496 && eh->plt.noncall_refcount == 0)
13497 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13498 they all resolve dynamically instead. Reserve room for the
13499 GOT entry's R_ARM_IRELATIVE relocation. */
13500 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13501 else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13502 || h->root.type != bfd_link_hash_undefweak))
13503 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13504 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13508 h->got.offset = (bfd_vma) -1;
13510 /* Allocate stubs for exported Thumb functions on v4t. */
13511 if (!htab->use_blx && h->dynindx != -1
13513 && h->target_internal == ST_BRANCH_TO_THUMB
13514 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13516 struct elf_link_hash_entry * th;
13517 struct bfd_link_hash_entry * bh;
13518 struct elf_link_hash_entry * myh;
13522 /* Create a new symbol to regist the real location of the function. */
13523 s = h->root.u.def.section;
13524 sprintf (name, "__real_%s", h->root.root.string);
13525 _bfd_generic_link_add_one_symbol (info, s->owner,
13526 name, BSF_GLOBAL, s,
13527 h->root.u.def.value,
13528 NULL, TRUE, FALSE, &bh);
13530 myh = (struct elf_link_hash_entry *) bh;
13531 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13532 myh->forced_local = 1;
13533 myh->target_internal = ST_BRANCH_TO_THUMB;
13534 eh->export_glue = myh;
13535 th = record_arm_to_thumb_glue (info, h);
13536 /* Point the symbol at the stub. */
13537 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13538 h->target_internal = ST_BRANCH_TO_ARM;
13539 h->root.u.def.section = th->root.u.def.section;
13540 h->root.u.def.value = th->root.u.def.value & ~1;
13543 if (eh->dyn_relocs == NULL)
13546 /* In the shared -Bsymbolic case, discard space allocated for
13547 dynamic pc-relative relocs against symbols which turn out to be
13548 defined in regular objects. For the normal shared case, discard
13549 space for pc-relative relocs that have become local due to symbol
13550 visibility changes. */
13552 if (info->shared || htab->root.is_relocatable_executable)
13554 /* The only relocs that use pc_count are R_ARM_REL32 and
13555 R_ARM_REL32_NOI, which will appear on something like
13556 ".long foo - .". We want calls to protected symbols to resolve
13557 directly to the function rather than going via the plt. If people
13558 want function pointer comparisons to work as expected then they
13559 should avoid writing assembly like ".long foo - .". */
13560 if (SYMBOL_CALLS_LOCAL (info, h))
13562 struct elf_dyn_relocs **pp;
13564 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13566 p->count -= p->pc_count;
13575 if (htab->vxworks_p)
13577 struct elf_dyn_relocs **pp;
13579 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13581 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13588 /* Also discard relocs on undefined weak syms with non-default
13590 if (eh->dyn_relocs != NULL
13591 && h->root.type == bfd_link_hash_undefweak)
13593 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13594 eh->dyn_relocs = NULL;
13596 /* Make sure undefined weak symbols are output as a dynamic
13598 else if (h->dynindx == -1
13599 && !h->forced_local)
13601 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13606 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13607 && h->root.type == bfd_link_hash_new)
13609 /* Output absolute symbols so that we can create relocations
13610 against them. For normal symbols we output a relocation
13611 against the section that contains them. */
13612 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13619 /* For the non-shared case, discard space for relocs against
13620 symbols which turn out to need copy relocs or are not
13623 if (!h->non_got_ref
13624 && ((h->def_dynamic
13625 && !h->def_regular)
13626 || (htab->root.dynamic_sections_created
13627 && (h->root.type == bfd_link_hash_undefweak
13628 || h->root.type == bfd_link_hash_undefined))))
13630 /* Make sure this symbol is output as a dynamic symbol.
13631 Undefined weak syms won't yet be marked as dynamic. */
13632 if (h->dynindx == -1
13633 && !h->forced_local)
13635 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13639 /* If that succeeded, we know we'll be keeping all the
13641 if (h->dynindx != -1)
13645 eh->dyn_relocs = NULL;
13650 /* Finally, allocate space. */
13651 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13653 asection *sreloc = elf_section_data (p->sec)->sreloc;
13654 if (h->type == STT_GNU_IFUNC
13655 && eh->plt.noncall_refcount == 0
13656 && SYMBOL_REFERENCES_LOCAL (info, h))
13657 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13659 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13665 /* Find any dynamic relocs that apply to read-only sections. */
13668 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13670 struct elf32_arm_link_hash_entry * eh;
13671 struct elf_dyn_relocs * p;
13673 eh = (struct elf32_arm_link_hash_entry *) h;
13674 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13676 asection *s = p->sec;
13678 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13680 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13682 info->flags |= DF_TEXTREL;
13684 /* Not an error, just cut short the traversal. */
13692 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13695 struct elf32_arm_link_hash_table *globals;
13697 globals = elf32_arm_hash_table (info);
13698 if (globals == NULL)
13701 globals->byteswap_code = byteswap_code;
13704 /* Set the sizes of the dynamic sections. */
13707 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13708 struct bfd_link_info * info)
13713 bfd_boolean relocs;
13715 struct elf32_arm_link_hash_table *htab;
13717 htab = elf32_arm_hash_table (info);
13721 dynobj = elf_hash_table (info)->dynobj;
13722 BFD_ASSERT (dynobj != NULL);
13723 check_use_blx (htab);
13725 if (elf_hash_table (info)->dynamic_sections_created)
13727 /* Set the contents of the .interp section to the interpreter. */
13728 if (info->executable)
13730 s = bfd_get_linker_section (dynobj, ".interp");
13731 BFD_ASSERT (s != NULL);
13732 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13733 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13737 /* Set up .got offsets for local syms, and space for local dynamic
13739 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
13741 bfd_signed_vma *local_got;
13742 bfd_signed_vma *end_local_got;
13743 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13744 char *local_tls_type;
13745 bfd_vma *local_tlsdesc_gotent;
13746 bfd_size_type locsymcount;
13747 Elf_Internal_Shdr *symtab_hdr;
13749 bfd_boolean is_vxworks = htab->vxworks_p;
13750 unsigned int symndx;
13752 if (! is_arm_elf (ibfd))
13755 for (s = ibfd->sections; s != NULL; s = s->next)
13757 struct elf_dyn_relocs *p;
13759 for (p = (struct elf_dyn_relocs *)
13760 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13762 if (!bfd_is_abs_section (p->sec)
13763 && bfd_is_abs_section (p->sec->output_section))
13765 /* Input section has been discarded, either because
13766 it is a copy of a linkonce section or due to
13767 linker script /DISCARD/, so we'll be discarding
13770 else if (is_vxworks
13771 && strcmp (p->sec->output_section->name,
13774 /* Relocations in vxworks .tls_vars sections are
13775 handled specially by the loader. */
13777 else if (p->count != 0)
13779 srel = elf_section_data (p->sec)->sreloc;
13780 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13781 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13782 info->flags |= DF_TEXTREL;
13787 local_got = elf_local_got_refcounts (ibfd);
13791 symtab_hdr = & elf_symtab_hdr (ibfd);
13792 locsymcount = symtab_hdr->sh_info;
13793 end_local_got = local_got + locsymcount;
13794 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13795 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13796 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13798 s = htab->root.sgot;
13799 srel = htab->root.srelgot;
13800 for (; local_got < end_local_got;
13801 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13802 ++local_tlsdesc_gotent, ++symndx)
13804 *local_tlsdesc_gotent = (bfd_vma) -1;
13805 local_iplt = *local_iplt_ptr;
13806 if (local_iplt != NULL)
13808 struct elf_dyn_relocs *p;
13810 if (local_iplt->root.refcount > 0)
13812 elf32_arm_allocate_plt_entry (info, TRUE,
13815 if (local_iplt->arm.noncall_refcount == 0)
13816 /* All references to the PLT are calls, so all
13817 non-call references can resolve directly to the
13818 run-time target. This means that the .got entry
13819 would be the same as the .igot.plt entry, so there's
13820 no point creating both. */
13825 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13826 local_iplt->root.offset = (bfd_vma) -1;
13829 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13833 psrel = elf_section_data (p->sec)->sreloc;
13834 if (local_iplt->arm.noncall_refcount == 0)
13835 elf32_arm_allocate_irelocs (info, psrel, p->count);
13837 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13840 if (*local_got > 0)
13842 Elf_Internal_Sym *isym;
13844 *local_got = s->size;
13845 if (*local_tls_type & GOT_TLS_GD)
13846 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13848 if (*local_tls_type & GOT_TLS_GDESC)
13850 *local_tlsdesc_gotent = htab->root.sgotplt->size
13851 - elf32_arm_compute_jump_table_size (htab);
13852 htab->root.sgotplt->size += 8;
13853 *local_got = (bfd_vma) -2;
13854 /* plt.got_offset needs to know there's a TLS_DESC
13855 reloc in the middle of .got.plt. */
13856 htab->num_tls_desc++;
13858 if (*local_tls_type & GOT_TLS_IE)
13861 if (*local_tls_type & GOT_NORMAL)
13863 /* If the symbol is both GD and GDESC, *local_got
13864 may have been overwritten. */
13865 *local_got = s->size;
13869 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13873 /* If all references to an STT_GNU_IFUNC PLT are calls,
13874 then all non-call references, including this GOT entry,
13875 resolve directly to the run-time target. */
13876 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13877 && (local_iplt == NULL
13878 || local_iplt->arm.noncall_refcount == 0))
13879 elf32_arm_allocate_irelocs (info, srel, 1);
13880 else if (info->shared || output_bfd->flags & DYNAMIC)
13882 if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13883 || *local_tls_type & GOT_TLS_GD)
13884 elf32_arm_allocate_dynrelocs (info, srel, 1);
13886 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13888 elf32_arm_allocate_dynrelocs (info,
13889 htab->root.srelplt, 1);
13890 htab->tls_trampoline = -1;
13895 *local_got = (bfd_vma) -1;
13899 if (htab->tls_ldm_got.refcount > 0)
13901 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13902 for R_ARM_TLS_LDM32 relocations. */
13903 htab->tls_ldm_got.offset = htab->root.sgot->size;
13904 htab->root.sgot->size += 8;
13906 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13909 htab->tls_ldm_got.offset = -1;
13911 /* Allocate global sym .plt and .got entries, and space for global
13912 sym dynamic relocs. */
13913 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13915 /* Here we rummage through the found bfds to collect glue information. */
13916 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
13918 if (! is_arm_elf (ibfd))
13921 /* Initialise mapping tables for code/data. */
13922 bfd_elf32_arm_init_maps (ibfd);
13924 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13925 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13926 /* xgettext:c-format */
13927 _bfd_error_handler (_("Errors encountered processing file %s"),
13931 /* Allocate space for the glue sections now that we've sized them. */
13932 bfd_elf32_arm_allocate_interworking_sections (info);
13934 /* For every jump slot reserved in the sgotplt, reloc_count is
13935 incremented. However, when we reserve space for TLS descriptors,
13936 it's not incremented, so in order to compute the space reserved
13937 for them, it suffices to multiply the reloc count by the jump
13939 if (htab->root.srelplt)
13940 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13942 if (htab->tls_trampoline)
13944 if (htab->root.splt->size == 0)
13945 htab->root.splt->size += htab->plt_header_size;
13947 htab->tls_trampoline = htab->root.splt->size;
13948 htab->root.splt->size += htab->plt_entry_size;
13950 /* If we're not using lazy TLS relocations, don't generate the
13951 PLT and GOT entries they require. */
13952 if (!(info->flags & DF_BIND_NOW))
13954 htab->dt_tlsdesc_got = htab->root.sgot->size;
13955 htab->root.sgot->size += 4;
13957 htab->dt_tlsdesc_plt = htab->root.splt->size;
13958 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13962 /* The check_relocs and adjust_dynamic_symbol entry points have
13963 determined the sizes of the various dynamic sections. Allocate
13964 memory for them. */
13967 for (s = dynobj->sections; s != NULL; s = s->next)
13971 if ((s->flags & SEC_LINKER_CREATED) == 0)
13974 /* It's OK to base decisions on the section name, because none
13975 of the dynobj section names depend upon the input files. */
13976 name = bfd_get_section_name (dynobj, s);
13978 if (s == htab->root.splt)
13980 /* Remember whether there is a PLT. */
13981 plt = s->size != 0;
13983 else if (CONST_STRNEQ (name, ".rel"))
13987 /* Remember whether there are any reloc sections other
13988 than .rel(a).plt and .rela.plt.unloaded. */
13989 if (s != htab->root.srelplt && s != htab->srelplt2)
13992 /* We use the reloc_count field as a counter if we need
13993 to copy relocs into the output file. */
13994 s->reloc_count = 0;
13997 else if (s != htab->root.sgot
13998 && s != htab->root.sgotplt
13999 && s != htab->root.iplt
14000 && s != htab->root.igotplt
14001 && s != htab->sdynbss)
14003 /* It's not one of our sections, so don't allocate space. */
14009 /* If we don't need this section, strip it from the
14010 output file. This is mostly to handle .rel(a).bss and
14011 .rel(a).plt. We must create both sections in
14012 create_dynamic_sections, because they must be created
14013 before the linker maps input sections to output
14014 sections. The linker does that before
14015 adjust_dynamic_symbol is called, and it is that
14016 function which decides whether anything needs to go
14017 into these sections. */
14018 s->flags |= SEC_EXCLUDE;
14022 if ((s->flags & SEC_HAS_CONTENTS) == 0)
14025 /* Allocate memory for the section contents. */
14026 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
14027 if (s->contents == NULL)
14031 if (elf_hash_table (info)->dynamic_sections_created)
14033 /* Add some entries to the .dynamic section. We fill in the
14034 values later, in elf32_arm_finish_dynamic_sections, but we
14035 must add the entries now so that we get the correct size for
14036 the .dynamic section. The DT_DEBUG entry is filled in by the
14037 dynamic linker and used by the debugger. */
14038 #define add_dynamic_entry(TAG, VAL) \
14039 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
14041 if (info->executable)
14043 if (!add_dynamic_entry (DT_DEBUG, 0))
14049 if ( !add_dynamic_entry (DT_PLTGOT, 0)
14050 || !add_dynamic_entry (DT_PLTRELSZ, 0)
14051 || !add_dynamic_entry (DT_PLTREL,
14052 htab->use_rel ? DT_REL : DT_RELA)
14053 || !add_dynamic_entry (DT_JMPREL, 0))
14056 if (htab->dt_tlsdesc_plt &&
14057 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
14058 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
14066 if (!add_dynamic_entry (DT_REL, 0)
14067 || !add_dynamic_entry (DT_RELSZ, 0)
14068 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
14073 if (!add_dynamic_entry (DT_RELA, 0)
14074 || !add_dynamic_entry (DT_RELASZ, 0)
14075 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
14080 /* If any dynamic relocs apply to a read-only section,
14081 then we need a DT_TEXTREL entry. */
14082 if ((info->flags & DF_TEXTREL) == 0)
14083 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
14086 if ((info->flags & DF_TEXTREL) != 0)
14088 if (!add_dynamic_entry (DT_TEXTREL, 0))
14091 if (htab->vxworks_p
14092 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
14095 #undef add_dynamic_entry
14100 /* Size sections even though they're not dynamic. We use it to setup
14101 _TLS_MODULE_BASE_, if needed. */
14104 elf32_arm_always_size_sections (bfd *output_bfd,
14105 struct bfd_link_info *info)
14109 if (info->relocatable)
14112 tls_sec = elf_hash_table (info)->tls_sec;
14116 struct elf_link_hash_entry *tlsbase;
14118 tlsbase = elf_link_hash_lookup
14119 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
14123 struct bfd_link_hash_entry *bh = NULL;
14124 const struct elf_backend_data *bed
14125 = get_elf_backend_data (output_bfd);
14127 if (!(_bfd_generic_link_add_one_symbol
14128 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
14129 tls_sec, 0, NULL, FALSE,
14130 bed->collect, &bh)))
14133 tlsbase->type = STT_TLS;
14134 tlsbase = (struct elf_link_hash_entry *)bh;
14135 tlsbase->def_regular = 1;
14136 tlsbase->other = STV_HIDDEN;
14137 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
14143 /* Finish up dynamic symbol handling. We set the contents of various
14144 dynamic sections here. */
14147 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
14148 struct bfd_link_info * info,
14149 struct elf_link_hash_entry * h,
14150 Elf_Internal_Sym * sym)
14152 struct elf32_arm_link_hash_table *htab;
14153 struct elf32_arm_link_hash_entry *eh;
14155 htab = elf32_arm_hash_table (info);
14159 eh = (struct elf32_arm_link_hash_entry *) h;
14161 if (h->plt.offset != (bfd_vma) -1)
14165 BFD_ASSERT (h->dynindx != -1);
14166 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
14171 if (!h->def_regular)
14173 /* Mark the symbol as undefined, rather than as defined in
14174 the .plt section. Leave the value alone. */
14175 sym->st_shndx = SHN_UNDEF;
14176 /* If the symbol is weak, we do need to clear the value.
14177 Otherwise, the PLT entry would provide a definition for
14178 the symbol even if the symbol wasn't defined anywhere,
14179 and so the symbol would never be NULL. */
14180 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
14183 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
14185 /* At least one non-call relocation references this .iplt entry,
14186 so the .iplt entry is the function's canonical address. */
14187 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
14188 sym->st_target_internal = ST_BRANCH_TO_ARM;
14189 sym->st_shndx = (_bfd_elf_section_from_bfd_section
14190 (output_bfd, htab->root.iplt->output_section));
14191 sym->st_value = (h->plt.offset
14192 + htab->root.iplt->output_section->vma
14193 + htab->root.iplt->output_offset);
14200 Elf_Internal_Rela rel;
14202 /* This symbol needs a copy reloc. Set it up. */
14203 BFD_ASSERT (h->dynindx != -1
14204 && (h->root.type == bfd_link_hash_defined
14205 || h->root.type == bfd_link_hash_defweak));
14208 BFD_ASSERT (s != NULL);
14211 rel.r_offset = (h->root.u.def.value
14212 + h->root.u.def.section->output_section->vma
14213 + h->root.u.def.section->output_offset);
14214 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
14215 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
14218 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14219 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14220 to the ".got" section. */
14221 if (h == htab->root.hdynamic
14222 || (!htab->vxworks_p && h == htab->root.hgot))
14223 sym->st_shndx = SHN_ABS;
14229 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14231 const unsigned long *template, unsigned count)
14235 for (ix = 0; ix != count; ix++)
14237 unsigned long insn = template[ix];
14239 /* Emit mov pc,rx if bx is not permitted. */
14240 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14241 insn = (insn & 0xf000000f) | 0x01a0f000;
14242 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14246 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
14247 other variants, NaCl needs this entry in a static executable's
14248 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
14249 zero. For .iplt really only the last bundle is useful, and .iplt
14250 could have a shorter first entry, with each individual PLT entry's
14251 relative branch calculated differently so it targets the last
14252 bundle instead of the instruction before it (labelled .Lplt_tail
14253 above). But it's simpler to keep the size and layout of PLT0
14254 consistent with the dynamic case, at the cost of some dead code at
14255 the start of .iplt and the one dead store to the stack at the start
14258 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14259 asection *plt, bfd_vma got_displacement)
14263 put_arm_insn (htab, output_bfd,
14264 elf32_arm_nacl_plt0_entry[0]
14265 | arm_movw_immediate (got_displacement),
14266 plt->contents + 0);
14267 put_arm_insn (htab, output_bfd,
14268 elf32_arm_nacl_plt0_entry[1]
14269 | arm_movt_immediate (got_displacement),
14270 plt->contents + 4);
14272 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14273 put_arm_insn (htab, output_bfd,
14274 elf32_arm_nacl_plt0_entry[i],
14275 plt->contents + (i * 4));
14278 /* Finish up the dynamic sections. */
14281 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14286 struct elf32_arm_link_hash_table *htab;
14288 htab = elf32_arm_hash_table (info);
14292 dynobj = elf_hash_table (info)->dynobj;
14294 sgot = htab->root.sgotplt;
14295 /* A broken linker script might have discarded the dynamic sections.
14296 Catch this here so that we do not seg-fault later on. */
14297 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14299 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
14301 if (elf_hash_table (info)->dynamic_sections_created)
14304 Elf32_External_Dyn *dyncon, *dynconend;
14306 splt = htab->root.splt;
14307 BFD_ASSERT (splt != NULL && sdyn != NULL);
14308 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14310 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14311 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14313 for (; dyncon < dynconend; dyncon++)
14315 Elf_Internal_Dyn dyn;
14319 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14326 if (htab->vxworks_p
14327 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14328 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14333 goto get_vma_if_bpabi;
14336 goto get_vma_if_bpabi;
14339 goto get_vma_if_bpabi;
14341 name = ".gnu.version";
14342 goto get_vma_if_bpabi;
14344 name = ".gnu.version_d";
14345 goto get_vma_if_bpabi;
14347 name = ".gnu.version_r";
14348 goto get_vma_if_bpabi;
14354 name = RELOC_SECTION (htab, ".plt");
14356 s = bfd_get_section_by_name (output_bfd, name);
14359 /* PR ld/14397: Issue an error message if a required section is missing. */
14360 (*_bfd_error_handler)
14361 (_("error: required section '%s' not found in the linker script"), name);
14362 bfd_set_error (bfd_error_invalid_operation);
14365 if (!htab->symbian_p)
14366 dyn.d_un.d_ptr = s->vma;
14368 /* In the BPABI, tags in the PT_DYNAMIC section point
14369 at the file offset, not the memory address, for the
14370 convenience of the post linker. */
14371 dyn.d_un.d_ptr = s->filepos;
14372 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14376 if (htab->symbian_p)
14381 s = htab->root.srelplt;
14382 BFD_ASSERT (s != NULL);
14383 dyn.d_un.d_val = s->size;
14384 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14389 if (!htab->symbian_p)
14391 /* My reading of the SVR4 ABI indicates that the
14392 procedure linkage table relocs (DT_JMPREL) should be
14393 included in the overall relocs (DT_REL). This is
14394 what Solaris does. However, UnixWare can not handle
14395 that case. Therefore, we override the DT_RELSZ entry
14396 here to make it not include the JMPREL relocs. Since
14397 the linker script arranges for .rel(a).plt to follow all
14398 other relocation sections, we don't have to worry
14399 about changing the DT_REL entry. */
14400 s = htab->root.srelplt;
14402 dyn.d_un.d_val -= s->size;
14403 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14406 /* Fall through. */
14410 /* In the BPABI, the DT_REL tag must point at the file
14411 offset, not the VMA, of the first relocation
14412 section. So, we use code similar to that in
14413 elflink.c, but do not check for SHF_ALLOC on the
14414 relcoation section, since relocations sections are
14415 never allocated under the BPABI. The comments above
14416 about Unixware notwithstanding, we include all of the
14417 relocations here. */
14418 if (htab->symbian_p)
14421 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14422 ? SHT_REL : SHT_RELA);
14423 dyn.d_un.d_val = 0;
14424 for (i = 1; i < elf_numsections (output_bfd); i++)
14426 Elf_Internal_Shdr *hdr
14427 = elf_elfsections (output_bfd)[i];
14428 if (hdr->sh_type == type)
14430 if (dyn.d_tag == DT_RELSZ
14431 || dyn.d_tag == DT_RELASZ)
14432 dyn.d_un.d_val += hdr->sh_size;
14433 else if ((ufile_ptr) hdr->sh_offset
14434 <= dyn.d_un.d_val - 1)
14435 dyn.d_un.d_val = hdr->sh_offset;
14438 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14442 case DT_TLSDESC_PLT:
14443 s = htab->root.splt;
14444 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14445 + htab->dt_tlsdesc_plt);
14446 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14449 case DT_TLSDESC_GOT:
14450 s = htab->root.sgot;
14451 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14452 + htab->dt_tlsdesc_got);
14453 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14456 /* Set the bottom bit of DT_INIT/FINI if the
14457 corresponding function is Thumb. */
14459 name = info->init_function;
14462 name = info->fini_function;
14464 /* If it wasn't set by elf_bfd_final_link
14465 then there is nothing to adjust. */
14466 if (dyn.d_un.d_val != 0)
14468 struct elf_link_hash_entry * eh;
14470 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14471 FALSE, FALSE, TRUE);
14472 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14474 dyn.d_un.d_val |= 1;
14475 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14482 /* Fill in the first entry in the procedure linkage table. */
14483 if (splt->size > 0 && htab->plt_header_size)
14485 const bfd_vma *plt0_entry;
14486 bfd_vma got_address, plt_address, got_displacement;
14488 /* Calculate the addresses of the GOT and PLT. */
14489 got_address = sgot->output_section->vma + sgot->output_offset;
14490 plt_address = splt->output_section->vma + splt->output_offset;
14492 if (htab->vxworks_p)
14494 /* The VxWorks GOT is relocated by the dynamic linker.
14495 Therefore, we must emit relocations rather than simply
14496 computing the values now. */
14497 Elf_Internal_Rela rel;
14499 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14500 put_arm_insn (htab, output_bfd, plt0_entry[0],
14501 splt->contents + 0);
14502 put_arm_insn (htab, output_bfd, plt0_entry[1],
14503 splt->contents + 4);
14504 put_arm_insn (htab, output_bfd, plt0_entry[2],
14505 splt->contents + 8);
14506 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14508 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14509 rel.r_offset = plt_address + 12;
14510 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14512 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14513 htab->srelplt2->contents);
14515 else if (htab->nacl_p)
14516 arm_nacl_put_plt0 (htab, output_bfd, splt,
14517 got_address + 8 - (plt_address + 16));
14518 else if (using_thumb_only (htab))
14520 got_displacement = got_address - (plt_address + 12);
14522 plt0_entry = elf32_thumb2_plt0_entry;
14523 put_arm_insn (htab, output_bfd, plt0_entry[0],
14524 splt->contents + 0);
14525 put_arm_insn (htab, output_bfd, plt0_entry[1],
14526 splt->contents + 4);
14527 put_arm_insn (htab, output_bfd, plt0_entry[2],
14528 splt->contents + 8);
14530 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
14534 got_displacement = got_address - (plt_address + 16);
14536 plt0_entry = elf32_arm_plt0_entry;
14537 put_arm_insn (htab, output_bfd, plt0_entry[0],
14538 splt->contents + 0);
14539 put_arm_insn (htab, output_bfd, plt0_entry[1],
14540 splt->contents + 4);
14541 put_arm_insn (htab, output_bfd, plt0_entry[2],
14542 splt->contents + 8);
14543 put_arm_insn (htab, output_bfd, plt0_entry[3],
14544 splt->contents + 12);
14546 #ifdef FOUR_WORD_PLT
14547 /* The displacement value goes in the otherwise-unused
14548 last word of the second entry. */
14549 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14551 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14556 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14557 really seem like the right value. */
14558 if (splt->output_section->owner == output_bfd)
14559 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14561 if (htab->dt_tlsdesc_plt)
14563 bfd_vma got_address
14564 = sgot->output_section->vma + sgot->output_offset;
14565 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14566 + htab->root.sgot->output_offset);
14567 bfd_vma plt_address
14568 = splt->output_section->vma + splt->output_offset;
14570 arm_put_trampoline (htab, output_bfd,
14571 splt->contents + htab->dt_tlsdesc_plt,
14572 dl_tlsdesc_lazy_trampoline, 6);
14574 bfd_put_32 (output_bfd,
14575 gotplt_address + htab->dt_tlsdesc_got
14576 - (plt_address + htab->dt_tlsdesc_plt)
14577 - dl_tlsdesc_lazy_trampoline[6],
14578 splt->contents + htab->dt_tlsdesc_plt + 24);
14579 bfd_put_32 (output_bfd,
14580 got_address - (plt_address + htab->dt_tlsdesc_plt)
14581 - dl_tlsdesc_lazy_trampoline[7],
14582 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14585 if (htab->tls_trampoline)
14587 arm_put_trampoline (htab, output_bfd,
14588 splt->contents + htab->tls_trampoline,
14589 tls_trampoline, 3);
14590 #ifdef FOUR_WORD_PLT
14591 bfd_put_32 (output_bfd, 0x00000000,
14592 splt->contents + htab->tls_trampoline + 12);
14596 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14598 /* Correct the .rel(a).plt.unloaded relocations. They will have
14599 incorrect symbol indexes. */
14603 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14604 / htab->plt_entry_size);
14605 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14607 for (; num_plts; num_plts--)
14609 Elf_Internal_Rela rel;
14611 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14612 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14613 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14614 p += RELOC_SIZE (htab);
14616 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14617 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14618 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14619 p += RELOC_SIZE (htab);
14624 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
14625 /* NaCl uses a special first entry in .iplt too. */
14626 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
14628 /* Fill in the first three entries in the global offset table. */
14631 if (sgot->size > 0)
14634 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14636 bfd_put_32 (output_bfd,
14637 sdyn->output_section->vma + sdyn->output_offset,
14639 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14640 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14643 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14650 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14652 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14653 struct elf32_arm_link_hash_table *globals;
14655 i_ehdrp = elf_elfheader (abfd);
14657 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14658 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14660 _bfd_elf_post_process_headers (abfd, link_info);
14661 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14665 globals = elf32_arm_hash_table (link_info);
14666 if (globals != NULL && globals->byteswap_code)
14667 i_ehdrp->e_flags |= EF_ARM_BE8;
14670 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
14671 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
14673 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
14675 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
14677 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
14681 static enum elf_reloc_type_class
14682 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
14683 const asection *rel_sec ATTRIBUTE_UNUSED,
14684 const Elf_Internal_Rela *rela)
14686 switch ((int) ELF32_R_TYPE (rela->r_info))
14688 case R_ARM_RELATIVE:
14689 return reloc_class_relative;
14690 case R_ARM_JUMP_SLOT:
14691 return reloc_class_plt;
14693 return reloc_class_copy;
14695 return reloc_class_normal;
14700 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14702 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14705 /* Return TRUE if this is an unwinding table entry. */
14708 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14710 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14711 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14715 /* Set the type and flags for an ARM section. We do this by
14716 the section name, which is a hack, but ought to work. */
14719 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14723 name = bfd_get_section_name (abfd, sec);
14725 if (is_arm_elf_unwind_section_name (abfd, name))
14727 hdr->sh_type = SHT_ARM_EXIDX;
14728 hdr->sh_flags |= SHF_LINK_ORDER;
14733 /* Handle an ARM specific section when reading an object file. This is
14734 called when bfd_section_from_shdr finds a section with an unknown
14738 elf32_arm_section_from_shdr (bfd *abfd,
14739 Elf_Internal_Shdr * hdr,
14743 /* There ought to be a place to keep ELF backend specific flags, but
14744 at the moment there isn't one. We just keep track of the
14745 sections by their name, instead. Fortunately, the ABI gives
14746 names for all the ARM specific sections, so we will probably get
14748 switch (hdr->sh_type)
14750 case SHT_ARM_EXIDX:
14751 case SHT_ARM_PREEMPTMAP:
14752 case SHT_ARM_ATTRIBUTES:
14759 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14765 static _arm_elf_section_data *
14766 get_arm_elf_section_data (asection * sec)
14768 if (sec && sec->owner && is_arm_elf (sec->owner))
14769 return elf32_arm_section_data (sec);
14777 struct bfd_link_info *info;
14780 int (*func) (void *, const char *, Elf_Internal_Sym *,
14781 asection *, struct elf_link_hash_entry *);
14782 } output_arch_syminfo;
14784 enum map_symbol_type
14792 /* Output a single mapping symbol. */
14795 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14796 enum map_symbol_type type,
14799 static const char *names[3] = {"$a", "$t", "$d"};
14800 Elf_Internal_Sym sym;
14802 sym.st_value = osi->sec->output_section->vma
14803 + osi->sec->output_offset
14807 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14808 sym.st_shndx = osi->sec_shndx;
14809 sym.st_target_internal = 0;
14810 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14811 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14814 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14815 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14818 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14819 bfd_boolean is_iplt_entry_p,
14820 union gotplt_union *root_plt,
14821 struct arm_plt_info *arm_plt)
14823 struct elf32_arm_link_hash_table *htab;
14824 bfd_vma addr, plt_header_size;
14826 if (root_plt->offset == (bfd_vma) -1)
14829 htab = elf32_arm_hash_table (osi->info);
14833 if (is_iplt_entry_p)
14835 osi->sec = htab->root.iplt;
14836 plt_header_size = 0;
14840 osi->sec = htab->root.splt;
14841 plt_header_size = htab->plt_header_size;
14843 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14844 (osi->info->output_bfd, osi->sec->output_section));
14846 addr = root_plt->offset & -2;
14847 if (htab->symbian_p)
14849 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14851 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14854 else if (htab->vxworks_p)
14856 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14858 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14860 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14862 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14865 else if (htab->nacl_p)
14867 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14870 else if (using_thumb_only (htab))
14872 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
14877 bfd_boolean thumb_stub_p;
14879 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14882 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14885 #ifdef FOUR_WORD_PLT
14886 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14888 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14891 /* A three-word PLT with no Thumb thunk contains only Arm code,
14892 so only need to output a mapping symbol for the first PLT entry and
14893 entries with thumb thunks. */
14894 if (thumb_stub_p || addr == plt_header_size)
14896 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14905 /* Output mapping symbols for PLT entries associated with H. */
14908 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14910 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14911 struct elf32_arm_link_hash_entry *eh;
14913 if (h->root.type == bfd_link_hash_indirect)
14916 if (h->root.type == bfd_link_hash_warning)
14917 /* When warning symbols are created, they **replace** the "real"
14918 entry in the hash table, thus we never get to see the real
14919 symbol in a hash traversal. So look at it now. */
14920 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14922 eh = (struct elf32_arm_link_hash_entry *) h;
14923 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14924 &h->plt, &eh->plt);
14927 /* Output a single local symbol for a generated stub. */
14930 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14931 bfd_vma offset, bfd_vma size)
14933 Elf_Internal_Sym sym;
14935 sym.st_value = osi->sec->output_section->vma
14936 + osi->sec->output_offset
14938 sym.st_size = size;
14940 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14941 sym.st_shndx = osi->sec_shndx;
14942 sym.st_target_internal = 0;
14943 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
14947 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14950 struct elf32_arm_stub_hash_entry *stub_entry;
14951 asection *stub_sec;
14954 output_arch_syminfo *osi;
14955 const insn_sequence *template_sequence;
14956 enum stub_insn_type prev_type;
14959 enum map_symbol_type sym_type;
14961 /* Massage our args to the form they really have. */
14962 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14963 osi = (output_arch_syminfo *) in_arg;
14965 stub_sec = stub_entry->stub_sec;
14967 /* Ensure this stub is attached to the current section being
14969 if (stub_sec != osi->sec)
14972 addr = (bfd_vma) stub_entry->stub_offset;
14973 stub_name = stub_entry->output_name;
14975 template_sequence = stub_entry->stub_template;
14976 switch (template_sequence[0].type)
14979 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14984 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14985 stub_entry->stub_size))
14993 prev_type = DATA_TYPE;
14995 for (i = 0; i < stub_entry->stub_template_size; i++)
14997 switch (template_sequence[i].type)
15000 sym_type = ARM_MAP_ARM;
15005 sym_type = ARM_MAP_THUMB;
15009 sym_type = ARM_MAP_DATA;
15017 if (template_sequence[i].type != prev_type)
15019 prev_type = template_sequence[i].type;
15020 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
15024 switch (template_sequence[i].type)
15048 /* Output mapping symbols for linker generated sections,
15049 and for those data-only sections that do not have a
15053 elf32_arm_output_arch_local_syms (bfd *output_bfd,
15054 struct bfd_link_info *info,
15056 int (*func) (void *, const char *,
15057 Elf_Internal_Sym *,
15059 struct elf_link_hash_entry *))
15061 output_arch_syminfo osi;
15062 struct elf32_arm_link_hash_table *htab;
15064 bfd_size_type size;
15067 htab = elf32_arm_hash_table (info);
15071 check_use_blx (htab);
15073 osi.flaginfo = flaginfo;
15077 /* Add a $d mapping symbol to data-only sections that
15078 don't have any mapping symbol. This may result in (harmless) redundant
15079 mapping symbols. */
15080 for (input_bfd = info->input_bfds;
15082 input_bfd = input_bfd->link.next)
15084 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
15085 for (osi.sec = input_bfd->sections;
15087 osi.sec = osi.sec->next)
15089 if (osi.sec->output_section != NULL
15090 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
15092 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
15093 == SEC_HAS_CONTENTS
15094 && get_arm_elf_section_data (osi.sec) != NULL
15095 && get_arm_elf_section_data (osi.sec)->mapcount == 0
15096 && osi.sec->size > 0
15097 && (osi.sec->flags & SEC_EXCLUDE) == 0)
15099 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15100 (output_bfd, osi.sec->output_section);
15101 if (osi.sec_shndx != (int)SHN_BAD)
15102 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
15107 /* ARM->Thumb glue. */
15108 if (htab->arm_glue_size > 0)
15110 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15111 ARM2THUMB_GLUE_SECTION_NAME);
15113 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15114 (output_bfd, osi.sec->output_section);
15115 if (info->shared || htab->root.is_relocatable_executable
15116 || htab->pic_veneer)
15117 size = ARM2THUMB_PIC_GLUE_SIZE;
15118 else if (htab->use_blx)
15119 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
15121 size = ARM2THUMB_STATIC_GLUE_SIZE;
15123 for (offset = 0; offset < htab->arm_glue_size; offset += size)
15125 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
15126 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
15130 /* Thumb->ARM glue. */
15131 if (htab->thumb_glue_size > 0)
15133 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15134 THUMB2ARM_GLUE_SECTION_NAME);
15136 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15137 (output_bfd, osi.sec->output_section);
15138 size = THUMB2ARM_GLUE_SIZE;
15140 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
15142 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
15143 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
15147 /* ARMv4 BX veneers. */
15148 if (htab->bx_glue_size > 0)
15150 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15151 ARM_BX_GLUE_SECTION_NAME);
15153 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15154 (output_bfd, osi.sec->output_section);
15156 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
15159 /* Long calls stubs. */
15160 if (htab->stub_bfd && htab->stub_bfd->sections)
15162 asection* stub_sec;
15164 for (stub_sec = htab->stub_bfd->sections;
15166 stub_sec = stub_sec->next)
15168 /* Ignore non-stub sections. */
15169 if (!strstr (stub_sec->name, STUB_SUFFIX))
15172 osi.sec = stub_sec;
15174 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15175 (output_bfd, osi.sec->output_section);
15177 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
15181 /* Finally, output mapping symbols for the PLT. */
15182 if (htab->root.splt && htab->root.splt->size > 0)
15184 osi.sec = htab->root.splt;
15185 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
15186 (output_bfd, osi.sec->output_section));
15188 /* Output mapping symbols for the plt header. SymbianOS does not have a
15190 if (htab->vxworks_p)
15192 /* VxWorks shared libraries have no PLT header. */
15195 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15197 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
15201 else if (htab->nacl_p)
15203 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15206 else if (using_thumb_only (htab))
15208 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
15210 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
15212 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
15215 else if (!htab->symbian_p)
15217 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15219 #ifndef FOUR_WORD_PLT
15220 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
15225 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
15227 /* NaCl uses a special first entry in .iplt too. */
15228 osi.sec = htab->root.iplt;
15229 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
15230 (output_bfd, osi.sec->output_section));
15231 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15234 if ((htab->root.splt && htab->root.splt->size > 0)
15235 || (htab->root.iplt && htab->root.iplt->size > 0))
15237 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
15238 for (input_bfd = info->input_bfds;
15240 input_bfd = input_bfd->link.next)
15242 struct arm_local_iplt_info **local_iplt;
15243 unsigned int i, num_syms;
15245 local_iplt = elf32_arm_local_iplt (input_bfd);
15246 if (local_iplt != NULL)
15248 num_syms = elf_symtab_hdr (input_bfd).sh_info;
15249 for (i = 0; i < num_syms; i++)
15250 if (local_iplt[i] != NULL
15251 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
15252 &local_iplt[i]->root,
15253 &local_iplt[i]->arm))
15258 if (htab->dt_tlsdesc_plt != 0)
15260 /* Mapping symbols for the lazy tls trampoline. */
15261 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
15264 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15265 htab->dt_tlsdesc_plt + 24))
15268 if (htab->tls_trampoline != 0)
15270 /* Mapping symbols for the tls trampoline. */
15271 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
15273 #ifdef FOUR_WORD_PLT
15274 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15275 htab->tls_trampoline + 12))
15283 /* Allocate target specific section data. */
15286 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
15288 if (!sec->used_by_bfd)
15290 _arm_elf_section_data *sdata;
15291 bfd_size_type amt = sizeof (*sdata);
15293 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
15296 sec->used_by_bfd = sdata;
15299 return _bfd_elf_new_section_hook (abfd, sec);
15303 /* Used to order a list of mapping symbols by address. */
15306 elf32_arm_compare_mapping (const void * a, const void * b)
15308 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
15309 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
15311 if (amap->vma > bmap->vma)
15313 else if (amap->vma < bmap->vma)
15315 else if (amap->type > bmap->type)
15316 /* Ensure results do not depend on the host qsort for objects with
15317 multiple mapping symbols at the same address by sorting on type
15320 else if (amap->type < bmap->type)
15326 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15328 static unsigned long
15329 offset_prel31 (unsigned long addr, bfd_vma offset)
15331 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15334 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15338 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15340 unsigned long first_word = bfd_get_32 (output_bfd, from);
15341 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15343 /* High bit of first word is supposed to be zero. */
15344 if ((first_word & 0x80000000ul) == 0)
15345 first_word = offset_prel31 (first_word, offset);
15347 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15348 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15349 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15350 second_word = offset_prel31 (second_word, offset);
15352 bfd_put_32 (output_bfd, first_word, to);
15353 bfd_put_32 (output_bfd, second_word, to + 4);
15356 /* Data for make_branch_to_a8_stub(). */
15358 struct a8_branch_to_stub_data
15360 asection *writing_section;
15361 bfd_byte *contents;
15365 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15366 places for a particular section. */
15369 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15372 struct elf32_arm_stub_hash_entry *stub_entry;
15373 struct a8_branch_to_stub_data *data;
15374 bfd_byte *contents;
15375 unsigned long branch_insn;
15376 bfd_vma veneered_insn_loc, veneer_entry_loc;
15377 bfd_signed_vma branch_offset;
15379 unsigned int target;
15381 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15382 data = (struct a8_branch_to_stub_data *) in_arg;
15384 if (stub_entry->target_section != data->writing_section
15385 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15388 contents = data->contents;
15390 veneered_insn_loc = stub_entry->target_section->output_section->vma
15391 + stub_entry->target_section->output_offset
15392 + stub_entry->target_value;
15394 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15395 + stub_entry->stub_sec->output_offset
15396 + stub_entry->stub_offset;
15398 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15399 veneered_insn_loc &= ~3u;
15401 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15403 abfd = stub_entry->target_section->owner;
15404 target = stub_entry->target_value;
15406 /* We attempt to avoid this condition by setting stubs_always_after_branch
15407 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15408 This check is just to be on the safe side... */
15409 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15411 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15412 "allocated in unsafe location"), abfd);
15416 switch (stub_entry->stub_type)
15418 case arm_stub_a8_veneer_b:
15419 case arm_stub_a8_veneer_b_cond:
15420 branch_insn = 0xf0009000;
15423 case arm_stub_a8_veneer_blx:
15424 branch_insn = 0xf000e800;
15427 case arm_stub_a8_veneer_bl:
15429 unsigned int i1, j1, i2, j2, s;
15431 branch_insn = 0xf000d000;
15434 if (branch_offset < -16777216 || branch_offset > 16777214)
15436 /* There's not much we can do apart from complain if this
15438 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15439 "of range (input file too large)"), abfd);
15443 /* i1 = not(j1 eor s), so:
15445 j1 = (not i1) eor s. */
15447 branch_insn |= (branch_offset >> 1) & 0x7ff;
15448 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15449 i2 = (branch_offset >> 22) & 1;
15450 i1 = (branch_offset >> 23) & 1;
15451 s = (branch_offset >> 24) & 1;
15454 branch_insn |= j2 << 11;
15455 branch_insn |= j1 << 13;
15456 branch_insn |= s << 26;
15465 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15466 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15471 /* Do code byteswapping. Return FALSE afterwards so that the section is
15472 written out as normal. */
15475 elf32_arm_write_section (bfd *output_bfd,
15476 struct bfd_link_info *link_info,
15478 bfd_byte *contents)
15480 unsigned int mapcount, errcount;
15481 _arm_elf_section_data *arm_data;
15482 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15483 elf32_arm_section_map *map;
15484 elf32_vfp11_erratum_list *errnode;
15487 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15491 if (globals == NULL)
15494 /* If this section has not been allocated an _arm_elf_section_data
15495 structure then we cannot record anything. */
15496 arm_data = get_arm_elf_section_data (sec);
15497 if (arm_data == NULL)
15500 mapcount = arm_data->mapcount;
15501 map = arm_data->map;
15502 errcount = arm_data->erratumcount;
15506 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15508 for (errnode = arm_data->erratumlist; errnode != 0;
15509 errnode = errnode->next)
15511 bfd_vma target = errnode->vma - offset;
15513 switch (errnode->type)
15515 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15517 bfd_vma branch_to_veneer;
15518 /* Original condition code of instruction, plus bit mask for
15519 ARM B instruction. */
15520 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15523 /* The instruction is before the label. */
15526 /* Above offset included in -4 below. */
15527 branch_to_veneer = errnode->u.b.veneer->vma
15528 - errnode->vma - 4;
15530 if ((signed) branch_to_veneer < -(1 << 25)
15531 || (signed) branch_to_veneer >= (1 << 25))
15532 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15533 "range"), output_bfd);
15535 insn |= (branch_to_veneer >> 2) & 0xffffff;
15536 contents[endianflip ^ target] = insn & 0xff;
15537 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15538 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15539 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15543 case VFP11_ERRATUM_ARM_VENEER:
15545 bfd_vma branch_from_veneer;
15548 /* Take size of veneer into account. */
15549 branch_from_veneer = errnode->u.v.branch->vma
15550 - errnode->vma - 12;
15552 if ((signed) branch_from_veneer < -(1 << 25)
15553 || (signed) branch_from_veneer >= (1 << 25))
15554 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15555 "range"), output_bfd);
15557 /* Original instruction. */
15558 insn = errnode->u.v.branch->u.b.vfp_insn;
15559 contents[endianflip ^ target] = insn & 0xff;
15560 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15561 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15562 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15564 /* Branch back to insn after original insn. */
15565 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15566 contents[endianflip ^ (target + 4)] = insn & 0xff;
15567 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15568 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15569 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15579 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15581 arm_unwind_table_edit *edit_node
15582 = arm_data->u.exidx.unwind_edit_list;
15583 /* Now, sec->size is the size of the section we will write. The original
15584 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15585 markers) was sec->rawsize. (This isn't the case if we perform no
15586 edits, then rawsize will be zero and we should use size). */
15587 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15588 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15589 unsigned int in_index, out_index;
15590 bfd_vma add_to_offsets = 0;
15592 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15596 unsigned int edit_index = edit_node->index;
15598 if (in_index < edit_index && in_index * 8 < input_size)
15600 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15601 contents + in_index * 8, add_to_offsets);
15605 else if (in_index == edit_index
15606 || (in_index * 8 >= input_size
15607 && edit_index == UINT_MAX))
15609 switch (edit_node->type)
15611 case DELETE_EXIDX_ENTRY:
15613 add_to_offsets += 8;
15616 case INSERT_EXIDX_CANTUNWIND_AT_END:
15618 asection *text_sec = edit_node->linked_section;
15619 bfd_vma text_offset = text_sec->output_section->vma
15620 + text_sec->output_offset
15622 bfd_vma exidx_offset = offset + out_index * 8;
15623 unsigned long prel31_offset;
15625 /* Note: this is meant to be equivalent to an
15626 R_ARM_PREL31 relocation. These synthetic
15627 EXIDX_CANTUNWIND markers are not relocated by the
15628 usual BFD method. */
15629 prel31_offset = (text_offset - exidx_offset)
15632 /* First address we can't unwind. */
15633 bfd_put_32 (output_bfd, prel31_offset,
15634 &edited_contents[out_index * 8]);
15636 /* Code for EXIDX_CANTUNWIND. */
15637 bfd_put_32 (output_bfd, 0x1,
15638 &edited_contents[out_index * 8 + 4]);
15641 add_to_offsets -= 8;
15646 edit_node = edit_node->next;
15651 /* No more edits, copy remaining entries verbatim. */
15652 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15653 contents + in_index * 8, add_to_offsets);
15659 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15660 bfd_set_section_contents (output_bfd, sec->output_section,
15662 (file_ptr) sec->output_offset, sec->size);
15667 /* Fix code to point to Cortex-A8 erratum stubs. */
15668 if (globals->fix_cortex_a8)
15670 struct a8_branch_to_stub_data data;
15672 data.writing_section = sec;
15673 data.contents = contents;
15675 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15682 if (globals->byteswap_code)
15684 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15687 for (i = 0; i < mapcount; i++)
15689 if (i == mapcount - 1)
15692 end = map[i + 1].vma;
15694 switch (map[i].type)
15697 /* Byte swap code words. */
15698 while (ptr + 3 < end)
15700 tmp = contents[ptr];
15701 contents[ptr] = contents[ptr + 3];
15702 contents[ptr + 3] = tmp;
15703 tmp = contents[ptr + 1];
15704 contents[ptr + 1] = contents[ptr + 2];
15705 contents[ptr + 2] = tmp;
15711 /* Byte swap code halfwords. */
15712 while (ptr + 1 < end)
15714 tmp = contents[ptr];
15715 contents[ptr] = contents[ptr + 1];
15716 contents[ptr + 1] = tmp;
15722 /* Leave data alone. */
15730 arm_data->mapcount = -1;
15731 arm_data->mapsize = 0;
15732 arm_data->map = NULL;
15737 /* Mangle thumb function symbols as we read them in. */
15740 elf32_arm_swap_symbol_in (bfd * abfd,
15743 Elf_Internal_Sym *dst)
15745 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15748 /* New EABI objects mark thumb function symbols by setting the low bit of
15750 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15751 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15753 if (dst->st_value & 1)
15755 dst->st_value &= ~(bfd_vma) 1;
15756 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15759 dst->st_target_internal = ST_BRANCH_TO_ARM;
15761 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15763 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15764 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15766 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15767 dst->st_target_internal = ST_BRANCH_LONG;
15769 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15775 /* Mangle thumb function symbols as we write them out. */
15778 elf32_arm_swap_symbol_out (bfd *abfd,
15779 const Elf_Internal_Sym *src,
15783 Elf_Internal_Sym newsym;
15785 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15786 of the address set, as per the new EABI. We do this unconditionally
15787 because objcopy does not set the elf header flags until after
15788 it writes out the symbol table. */
15789 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15792 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15793 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15794 if (newsym.st_shndx != SHN_UNDEF)
15796 /* Do this only for defined symbols. At link type, the static
15797 linker will simulate the work of dynamic linker of resolving
15798 symbols and will carry over the thumbness of found symbols to
15799 the output symbol table. It's not clear how it happens, but
15800 the thumbness of undefined symbols can well be different at
15801 runtime, and writing '1' for them will be confusing for users
15802 and possibly for dynamic linker itself.
15804 newsym.st_value |= 1;
15809 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15812 /* Add the PT_ARM_EXIDX program header. */
15815 elf32_arm_modify_segment_map (bfd *abfd,
15816 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15818 struct elf_segment_map *m;
15821 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15822 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15824 /* If there is already a PT_ARM_EXIDX header, then we do not
15825 want to add another one. This situation arises when running
15826 "strip"; the input binary already has the header. */
15827 m = elf_seg_map (abfd);
15828 while (m && m->p_type != PT_ARM_EXIDX)
15832 m = (struct elf_segment_map *)
15833 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15836 m->p_type = PT_ARM_EXIDX;
15838 m->sections[0] = sec;
15840 m->next = elf_seg_map (abfd);
15841 elf_seg_map (abfd) = m;
15848 /* We may add a PT_ARM_EXIDX program header. */
15851 elf32_arm_additional_program_headers (bfd *abfd,
15852 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15856 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15857 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15863 /* Hook called by the linker routine which adds symbols from an object
15867 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15868 Elf_Internal_Sym *sym, const char **namep,
15869 flagword *flagsp, asection **secp, bfd_vma *valp)
15871 if ((abfd->flags & DYNAMIC) == 0
15872 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15873 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15874 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15876 if (elf32_arm_hash_table (info) == NULL)
15879 if (elf32_arm_hash_table (info)->vxworks_p
15880 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15881 flagsp, secp, valp))
15887 /* We use this to override swap_symbol_in and swap_symbol_out. */
15888 const struct elf_size_info elf32_arm_size_info =
15890 sizeof (Elf32_External_Ehdr),
15891 sizeof (Elf32_External_Phdr),
15892 sizeof (Elf32_External_Shdr),
15893 sizeof (Elf32_External_Rel),
15894 sizeof (Elf32_External_Rela),
15895 sizeof (Elf32_External_Sym),
15896 sizeof (Elf32_External_Dyn),
15897 sizeof (Elf_External_Note),
15901 ELFCLASS32, EV_CURRENT,
15902 bfd_elf32_write_out_phdrs,
15903 bfd_elf32_write_shdrs_and_ehdr,
15904 bfd_elf32_checksum_contents,
15905 bfd_elf32_write_relocs,
15906 elf32_arm_swap_symbol_in,
15907 elf32_arm_swap_symbol_out,
15908 bfd_elf32_slurp_reloc_table,
15909 bfd_elf32_slurp_symbol_table,
15910 bfd_elf32_swap_dyn_in,
15911 bfd_elf32_swap_dyn_out,
15912 bfd_elf32_swap_reloc_in,
15913 bfd_elf32_swap_reloc_out,
15914 bfd_elf32_swap_reloca_in,
15915 bfd_elf32_swap_reloca_out
15918 /* Return size of plt0 entry starting at ADDR
15919 or (bfd_vma) -1 if size can not be determined. */
15922 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
15924 bfd_vma first_word;
15927 first_word = H_GET_32 (abfd, addr);
15929 if (first_word == elf32_arm_plt0_entry[0])
15930 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
15931 else if (first_word == elf32_thumb2_plt0_entry[0])
15932 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
15934 /* We don't yet handle this PLT format. */
15935 return (bfd_vma) -1;
15940 /* Return size of plt entry starting at offset OFFSET
15941 of plt section located at address START
15942 or (bfd_vma) -1 if size can not be determined. */
15945 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
15947 bfd_vma first_insn;
15948 bfd_vma plt_size = 0;
15949 const bfd_byte *addr = start + offset;
15951 /* PLT entry size if fixed on Thumb-only platforms. */
15952 if (H_GET_32(abfd, start) == elf32_thumb2_plt0_entry[0])
15953 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
15955 /* Respect Thumb stub if necessary. */
15956 if (H_GET_16(abfd, addr) == elf32_arm_plt_thumb_stub[0])
15958 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
15961 /* Strip immediate from first add. */
15962 first_insn = H_GET_32(abfd, addr + plt_size) & 0xffffff00;
15964 #ifdef FOUR_WORD_PLT
15965 if (first_insn == elf32_arm_plt_entry[0])
15966 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
15968 if (first_insn == elf32_arm_plt_entry_long[0])
15969 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
15970 else if (first_insn == elf32_arm_plt_entry_short[0])
15971 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
15974 /* We don't yet handle this PLT format. */
15975 return (bfd_vma) -1;
15980 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
15983 elf32_arm_get_synthetic_symtab (bfd *abfd,
15984 long symcount ATTRIBUTE_UNUSED,
15985 asymbol **syms ATTRIBUTE_UNUSED,
15995 Elf_Internal_Shdr *hdr;
16003 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
16006 if (dynsymcount <= 0)
16009 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
16010 if (relplt == NULL)
16013 hdr = &elf_section_data (relplt)->this_hdr;
16014 if (hdr->sh_link != elf_dynsymtab (abfd)
16015 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
16018 plt = bfd_get_section_by_name (abfd, ".plt");
16022 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
16025 data = plt->contents;
16028 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
16030 bfd_cache_section_contents((asection *) plt, data);
16033 count = relplt->size / hdr->sh_entsize;
16034 size = count * sizeof (asymbol);
16035 p = relplt->relocation;
16036 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
16038 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
16039 if (p->addend != 0)
16040 size += sizeof ("+0x") - 1 + 8;
16043 s = *ret = (asymbol *) bfd_malloc (size);
16047 offset = elf32_arm_plt0_size (abfd, data);
16048 if (offset == (bfd_vma) -1)
16051 names = (char *) (s + count);
16052 p = relplt->relocation;
16054 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
16058 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
16059 if (plt_size == (bfd_vma) -1)
16062 *s = **p->sym_ptr_ptr;
16063 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
16064 we are defining a symbol, ensure one of them is set. */
16065 if ((s->flags & BSF_LOCAL) == 0)
16066 s->flags |= BSF_GLOBAL;
16067 s->flags |= BSF_SYNTHETIC;
16072 len = strlen ((*p->sym_ptr_ptr)->name);
16073 memcpy (names, (*p->sym_ptr_ptr)->name, len);
16075 if (p->addend != 0)
16079 memcpy (names, "+0x", sizeof ("+0x") - 1);
16080 names += sizeof ("+0x") - 1;
16081 bfd_sprintf_vma (abfd, buf, p->addend);
16082 for (a = buf; *a == '0'; ++a)
16085 memcpy (names, a, len);
16088 memcpy (names, "@plt", sizeof ("@plt"));
16089 names += sizeof ("@plt");
16091 offset += plt_size;
16097 #define ELF_ARCH bfd_arch_arm
16098 #define ELF_TARGET_ID ARM_ELF_DATA
16099 #define ELF_MACHINE_CODE EM_ARM
16100 #ifdef __QNXTARGET__
16101 #define ELF_MAXPAGESIZE 0x1000
16103 #define ELF_MAXPAGESIZE 0x8000
16105 #define ELF_MINPAGESIZE 0x1000
16106 #define ELF_COMMONPAGESIZE 0x1000
16108 #define bfd_elf32_mkobject elf32_arm_mkobject
16110 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
16111 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
16112 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
16113 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
16114 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
16115 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
16116 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
16117 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
16118 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
16119 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
16120 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
16121 #define bfd_elf32_bfd_final_link elf32_arm_final_link
16122 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
16124 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
16125 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
16126 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
16127 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
16128 #define elf_backend_check_relocs elf32_arm_check_relocs
16129 #define elf_backend_relocate_section elf32_arm_relocate_section
16130 #define elf_backend_write_section elf32_arm_write_section
16131 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
16132 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
16133 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
16134 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
16135 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
16136 #define elf_backend_always_size_sections elf32_arm_always_size_sections
16137 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
16138 #define elf_backend_post_process_headers elf32_arm_post_process_headers
16139 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
16140 #define elf_backend_object_p elf32_arm_object_p
16141 #define elf_backend_fake_sections elf32_arm_fake_sections
16142 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
16143 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16144 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
16145 #define elf_backend_size_info elf32_arm_size_info
16146 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
16147 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
16148 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
16149 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
16150 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
16152 #define elf_backend_can_refcount 1
16153 #define elf_backend_can_gc_sections 1
16154 #define elf_backend_plt_readonly 1
16155 #define elf_backend_want_got_plt 1
16156 #define elf_backend_want_plt_sym 0
16157 #define elf_backend_may_use_rel_p 1
16158 #define elf_backend_may_use_rela_p 0
16159 #define elf_backend_default_use_rela_p 0
16161 #define elf_backend_got_header_size 12
16163 #undef elf_backend_obj_attrs_vendor
16164 #define elf_backend_obj_attrs_vendor "aeabi"
16165 #undef elf_backend_obj_attrs_section
16166 #define elf_backend_obj_attrs_section ".ARM.attributes"
16167 #undef elf_backend_obj_attrs_arg_type
16168 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
16169 #undef elf_backend_obj_attrs_section_type
16170 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
16171 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
16172 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
16174 #include "elf32-target.h"
16176 /* Native Client targets. */
16178 #undef TARGET_LITTLE_SYM
16179 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
16180 #undef TARGET_LITTLE_NAME
16181 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
16182 #undef TARGET_BIG_SYM
16183 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
16184 #undef TARGET_BIG_NAME
16185 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
16187 /* Like elf32_arm_link_hash_table_create -- but overrides
16188 appropriately for NaCl. */
16190 static struct bfd_link_hash_table *
16191 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
16193 struct bfd_link_hash_table *ret;
16195 ret = elf32_arm_link_hash_table_create (abfd);
16198 struct elf32_arm_link_hash_table *htab
16199 = (struct elf32_arm_link_hash_table *) ret;
16203 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
16204 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
16209 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
16210 really need to use elf32_arm_modify_segment_map. But we do it
16211 anyway just to reduce gratuitous differences with the stock ARM backend. */
16214 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
16216 return (elf32_arm_modify_segment_map (abfd, info)
16217 && nacl_modify_segment_map (abfd, info));
16221 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
16223 elf32_arm_final_write_processing (abfd, linker);
16224 nacl_final_write_processing (abfd, linker);
16228 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
16229 const arelent *rel ATTRIBUTE_UNUSED)
16232 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
16233 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
16237 #define elf32_bed elf32_arm_nacl_bed
16238 #undef bfd_elf32_bfd_link_hash_table_create
16239 #define bfd_elf32_bfd_link_hash_table_create \
16240 elf32_arm_nacl_link_hash_table_create
16241 #undef elf_backend_plt_alignment
16242 #define elf_backend_plt_alignment 4
16243 #undef elf_backend_modify_segment_map
16244 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
16245 #undef elf_backend_modify_program_headers
16246 #define elf_backend_modify_program_headers nacl_modify_program_headers
16247 #undef elf_backend_final_write_processing
16248 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
16249 #undef bfd_elf32_get_synthetic_symtab
16250 #undef elf_backend_plt_sym_val
16251 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
16253 #undef ELF_MAXPAGESIZE
16254 #define ELF_MAXPAGESIZE 0x10000
16255 #undef ELF_MINPAGESIZE
16256 #undef ELF_COMMONPAGESIZE
16259 #include "elf32-target.h"
16261 /* Reset to defaults. */
16262 #undef elf_backend_plt_alignment
16263 #undef elf_backend_modify_segment_map
16264 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
16265 #undef elf_backend_modify_program_headers
16266 #undef elf_backend_final_write_processing
16267 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16268 #undef ELF_MINPAGESIZE
16269 #define ELF_MINPAGESIZE 0x1000
16270 #undef ELF_COMMONPAGESIZE
16271 #define ELF_COMMONPAGESIZE 0x1000
16274 /* VxWorks Targets. */
16276 #undef TARGET_LITTLE_SYM
16277 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
16278 #undef TARGET_LITTLE_NAME
16279 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
16280 #undef TARGET_BIG_SYM
16281 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
16282 #undef TARGET_BIG_NAME
16283 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
16285 /* Like elf32_arm_link_hash_table_create -- but overrides
16286 appropriately for VxWorks. */
16288 static struct bfd_link_hash_table *
16289 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
16291 struct bfd_link_hash_table *ret;
16293 ret = elf32_arm_link_hash_table_create (abfd);
16296 struct elf32_arm_link_hash_table *htab
16297 = (struct elf32_arm_link_hash_table *) ret;
16299 htab->vxworks_p = 1;
16305 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
16307 elf32_arm_final_write_processing (abfd, linker);
16308 elf_vxworks_final_write_processing (abfd, linker);
16312 #define elf32_bed elf32_arm_vxworks_bed
16314 #undef bfd_elf32_bfd_link_hash_table_create
16315 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
16316 #undef elf_backend_final_write_processing
16317 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
16318 #undef elf_backend_emit_relocs
16319 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
16321 #undef elf_backend_may_use_rel_p
16322 #define elf_backend_may_use_rel_p 0
16323 #undef elf_backend_may_use_rela_p
16324 #define elf_backend_may_use_rela_p 1
16325 #undef elf_backend_default_use_rela_p
16326 #define elf_backend_default_use_rela_p 1
16327 #undef elf_backend_want_plt_sym
16328 #define elf_backend_want_plt_sym 1
16329 #undef ELF_MAXPAGESIZE
16330 #define ELF_MAXPAGESIZE 0x1000
16332 #include "elf32-target.h"
16335 /* Merge backend specific data from an object file to the output
16336 object file when linking. */
16339 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
16341 flagword out_flags;
16343 bfd_boolean flags_compatible = TRUE;
16346 /* Check if we have the same endianness. */
16347 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
16350 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
16353 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
16356 /* The input BFD must have had its flags initialised. */
16357 /* The following seems bogus to me -- The flags are initialized in
16358 the assembler but I don't think an elf_flags_init field is
16359 written into the object. */
16360 /* BFD_ASSERT (elf_flags_init (ibfd)); */
16362 in_flags = elf_elfheader (ibfd)->e_flags;
16363 out_flags = elf_elfheader (obfd)->e_flags;
16365 /* In theory there is no reason why we couldn't handle this. However
16366 in practice it isn't even close to working and there is no real
16367 reason to want it. */
16368 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
16369 && !(ibfd->flags & DYNAMIC)
16370 && (in_flags & EF_ARM_BE8))
16372 _bfd_error_handler (_("error: %B is already in final BE8 format"),
16377 if (!elf_flags_init (obfd))
16379 /* If the input is the default architecture and had the default
16380 flags then do not bother setting the flags for the output
16381 architecture, instead allow future merges to do this. If no
16382 future merges ever set these flags then they will retain their
16383 uninitialised values, which surprise surprise, correspond
16384 to the default values. */
16385 if (bfd_get_arch_info (ibfd)->the_default
16386 && elf_elfheader (ibfd)->e_flags == 0)
16389 elf_flags_init (obfd) = TRUE;
16390 elf_elfheader (obfd)->e_flags = in_flags;
16392 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
16393 && bfd_get_arch_info (obfd)->the_default)
16394 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
16399 /* Determine what should happen if the input ARM architecture
16400 does not match the output ARM architecture. */
16401 if (! bfd_arm_merge_machines (ibfd, obfd))
16404 /* Identical flags must be compatible. */
16405 if (in_flags == out_flags)
16408 /* Check to see if the input BFD actually contains any sections. If
16409 not, its flags may not have been initialised either, but it
16410 cannot actually cause any incompatiblity. Do not short-circuit
16411 dynamic objects; their section list may be emptied by
16412 elf_link_add_object_symbols.
16414 Also check to see if there are no code sections in the input.
16415 In this case there is no need to check for code specific flags.
16416 XXX - do we need to worry about floating-point format compatability
16417 in data sections ? */
16418 if (!(ibfd->flags & DYNAMIC))
16420 bfd_boolean null_input_bfd = TRUE;
16421 bfd_boolean only_data_sections = TRUE;
16423 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
16425 /* Ignore synthetic glue sections. */
16426 if (strcmp (sec->name, ".glue_7")
16427 && strcmp (sec->name, ".glue_7t"))
16429 if ((bfd_get_section_flags (ibfd, sec)
16430 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
16431 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
16432 only_data_sections = FALSE;
16434 null_input_bfd = FALSE;
16439 if (null_input_bfd || only_data_sections)
16443 /* Complain about various flag mismatches. */
16444 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
16445 EF_ARM_EABI_VERSION (out_flags)))
16448 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
16450 (in_flags & EF_ARM_EABIMASK) >> 24,
16451 (out_flags & EF_ARM_EABIMASK) >> 24);
16455 /* Not sure what needs to be checked for EABI versions >= 1. */
16456 /* VxWorks libraries do not use these flags. */
16457 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
16458 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
16459 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
16461 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
16464 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
16466 in_flags & EF_ARM_APCS_26 ? 26 : 32,
16467 out_flags & EF_ARM_APCS_26 ? 26 : 32);
16468 flags_compatible = FALSE;
16471 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
16473 if (in_flags & EF_ARM_APCS_FLOAT)
16475 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
16479 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
16482 flags_compatible = FALSE;
16485 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
16487 if (in_flags & EF_ARM_VFP_FLOAT)
16489 (_("error: %B uses VFP instructions, whereas %B does not"),
16493 (_("error: %B uses FPA instructions, whereas %B does not"),
16496 flags_compatible = FALSE;
16499 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
16501 if (in_flags & EF_ARM_MAVERICK_FLOAT)
16503 (_("error: %B uses Maverick instructions, whereas %B does not"),
16507 (_("error: %B does not use Maverick instructions, whereas %B does"),
16510 flags_compatible = FALSE;
16513 #ifdef EF_ARM_SOFT_FLOAT
16514 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
16516 /* We can allow interworking between code that is VFP format
16517 layout, and uses either soft float or integer regs for
16518 passing floating point arguments and results. We already
16519 know that the APCS_FLOAT flags match; similarly for VFP
16521 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
16522 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
16524 if (in_flags & EF_ARM_SOFT_FLOAT)
16526 (_("error: %B uses software FP, whereas %B uses hardware FP"),
16530 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16533 flags_compatible = FALSE;
16538 /* Interworking mismatch is only a warning. */
16539 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16541 if (in_flags & EF_ARM_INTERWORK)
16544 (_("Warning: %B supports interworking, whereas %B does not"),
16550 (_("Warning: %B does not support interworking, whereas %B does"),
16556 return flags_compatible;
16560 /* Symbian OS Targets. */
16562 #undef TARGET_LITTLE_SYM
16563 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
16564 #undef TARGET_LITTLE_NAME
16565 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16566 #undef TARGET_BIG_SYM
16567 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
16568 #undef TARGET_BIG_NAME
16569 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16571 /* Like elf32_arm_link_hash_table_create -- but overrides
16572 appropriately for Symbian OS. */
16574 static struct bfd_link_hash_table *
16575 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16577 struct bfd_link_hash_table *ret;
16579 ret = elf32_arm_link_hash_table_create (abfd);
16582 struct elf32_arm_link_hash_table *htab
16583 = (struct elf32_arm_link_hash_table *)ret;
16584 /* There is no PLT header for Symbian OS. */
16585 htab->plt_header_size = 0;
16586 /* The PLT entries are each one instruction and one word. */
16587 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16588 htab->symbian_p = 1;
16589 /* Symbian uses armv5t or above, so use_blx is always true. */
16591 htab->root.is_relocatable_executable = 1;
16596 static const struct bfd_elf_special_section
16597 elf32_arm_symbian_special_sections[] =
16599 /* In a BPABI executable, the dynamic linking sections do not go in
16600 the loadable read-only segment. The post-linker may wish to
16601 refer to these sections, but they are not part of the final
16603 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16604 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16605 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16606 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16607 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16608 /* These sections do not need to be writable as the SymbianOS
16609 postlinker will arrange things so that no dynamic relocation is
16611 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16612 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16613 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16614 { NULL, 0, 0, 0, 0 }
16618 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16619 struct bfd_link_info *link_info)
16621 /* BPABI objects are never loaded directly by an OS kernel; they are
16622 processed by a postlinker first, into an OS-specific format. If
16623 the D_PAGED bit is set on the file, BFD will align segments on
16624 page boundaries, so that an OS can directly map the file. With
16625 BPABI objects, that just results in wasted space. In addition,
16626 because we clear the D_PAGED bit, map_sections_to_segments will
16627 recognize that the program headers should not be mapped into any
16628 loadable segment. */
16629 abfd->flags &= ~D_PAGED;
16630 elf32_arm_begin_write_processing (abfd, link_info);
16634 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16635 struct bfd_link_info *info)
16637 struct elf_segment_map *m;
16640 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16641 segment. However, because the .dynamic section is not marked
16642 with SEC_LOAD, the generic ELF code will not create such a
16644 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16647 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
16648 if (m->p_type == PT_DYNAMIC)
16653 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16654 m->next = elf_seg_map (abfd);
16655 elf_seg_map (abfd) = m;
16659 /* Also call the generic arm routine. */
16660 return elf32_arm_modify_segment_map (abfd, info);
16663 /* Return address for Ith PLT stub in section PLT, for relocation REL
16664 or (bfd_vma) -1 if it should not be included. */
16667 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16668 const arelent *rel ATTRIBUTE_UNUSED)
16670 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16675 #define elf32_bed elf32_arm_symbian_bed
16677 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16678 will process them and then discard them. */
16679 #undef ELF_DYNAMIC_SEC_FLAGS
16680 #define ELF_DYNAMIC_SEC_FLAGS \
16681 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16683 #undef elf_backend_emit_relocs
16685 #undef bfd_elf32_bfd_link_hash_table_create
16686 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16687 #undef elf_backend_special_sections
16688 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16689 #undef elf_backend_begin_write_processing
16690 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16691 #undef elf_backend_final_write_processing
16692 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16694 #undef elf_backend_modify_segment_map
16695 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16697 /* There is no .got section for BPABI objects, and hence no header. */
16698 #undef elf_backend_got_header_size
16699 #define elf_backend_got_header_size 0
16701 /* Similarly, there is no .got.plt section. */
16702 #undef elf_backend_want_got_plt
16703 #define elf_backend_want_got_plt 0
16705 #undef elf_backend_plt_sym_val
16706 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16708 #undef elf_backend_may_use_rel_p
16709 #define elf_backend_may_use_rel_p 1
16710 #undef elf_backend_may_use_rela_p
16711 #define elf_backend_may_use_rela_p 0
16712 #undef elf_backend_default_use_rela_p
16713 #define elf_backend_default_use_rela_p 0
16714 #undef elf_backend_want_plt_sym
16715 #define elf_backend_want_plt_sym 0
16716 #undef ELF_MAXPAGESIZE
16717 #define ELF_MAXPAGESIZE 0x8000
16719 #include "elf32-target.h"