1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "libiberty.h"
29 #include "elf-vxworks.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 static struct elf_backend_data elf32_arm_vxworks_bed;
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 static reloc_howto_type elf32_arm_howto_table_1[] =
78 HOWTO (R_ARM_NONE, /* type */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
82 FALSE, /* pc_relative */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
90 FALSE), /* pcrel_offset */
92 HOWTO (R_ARM_PC24, /* type */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
96 TRUE, /* pc_relative */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
111 FALSE, /* pc_relative */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
126 TRUE, /* pc_relative */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
141 TRUE, /* pc_relative */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
156 FALSE, /* pc_relative */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
171 FALSE, /* pc_relative */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
181 HOWTO (R_ARM_THM_ABS5, /* type */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
185 FALSE, /* pc_relative */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
196 HOWTO (R_ARM_ABS8, /* type */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
200 FALSE, /* pc_relative */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
210 HOWTO (R_ARM_SBREL32, /* type */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
214 FALSE, /* pc_relative */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
224 HOWTO (R_ARM_THM_CALL, /* type */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
228 TRUE, /* pc_relative */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
238 HOWTO (R_ARM_THM_PC8, /* type */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
242 TRUE, /* pc_relative */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
252 HOWTO (R_ARM_BREL_ADJ, /* type */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
256 FALSE, /* pc_relative */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
266 HOWTO (R_ARM_SWI24, /* type */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
270 FALSE, /* pc_relative */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
280 HOWTO (R_ARM_THM_SWI8, /* type */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
284 FALSE, /* pc_relative */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
299 TRUE, /* pc_relative */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
314 TRUE, /* pc_relative */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
324 /* Dynamic TLS relocations. */
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
330 FALSE, /* pc_relative */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
344 FALSE, /* pc_relative */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
358 FALSE, /* pc_relative */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
368 /* Relocs used in ARM Linux */
370 HOWTO (R_ARM_COPY, /* type */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
374 FALSE, /* pc_relative */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
384 HOWTO (R_ARM_GLOB_DAT, /* type */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
388 FALSE, /* pc_relative */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
402 FALSE, /* pc_relative */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
412 HOWTO (R_ARM_RELATIVE, /* type */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
416 FALSE, /* pc_relative */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
426 HOWTO (R_ARM_GOTOFF32, /* type */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
430 FALSE, /* pc_relative */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
440 HOWTO (R_ARM_GOTPC, /* type */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
444 TRUE, /* pc_relative */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
454 HOWTO (R_ARM_GOT32, /* type */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
458 FALSE, /* pc_relative */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
468 HOWTO (R_ARM_PLT32, /* type */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
472 TRUE, /* pc_relative */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
482 HOWTO (R_ARM_CALL, /* type */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
486 TRUE, /* pc_relative */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
496 HOWTO (R_ARM_JUMP24, /* type */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
500 TRUE, /* pc_relative */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
510 HOWTO (R_ARM_THM_JUMP24, /* type */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
514 TRUE, /* pc_relative */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
524 HOWTO (R_ARM_BASE_ABS, /* type */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
528 FALSE, /* pc_relative */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
542 TRUE, /* pc_relative */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
556 TRUE, /* pc_relative */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
570 TRUE, /* pc_relative */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
584 FALSE, /* pc_relative */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 FALSE, /* pc_relative */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
612 FALSE, /* pc_relative */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
622 HOWTO (R_ARM_TARGET1, /* type */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
626 FALSE, /* pc_relative */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
636 HOWTO (R_ARM_ROSEGREL32, /* type */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
640 FALSE, /* pc_relative */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
650 HOWTO (R_ARM_V4BX, /* type */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
654 FALSE, /* pc_relative */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
664 HOWTO (R_ARM_TARGET2, /* type */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
668 FALSE, /* pc_relative */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
678 HOWTO (R_ARM_PREL31, /* type */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
682 TRUE, /* pc_relative */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
696 FALSE, /* pc_relative */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
706 HOWTO (R_ARM_MOVT_ABS, /* type */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
710 FALSE, /* pc_relative */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
724 TRUE, /* pc_relative */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
734 HOWTO (R_ARM_MOVT_PREL, /* type */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
738 TRUE, /* pc_relative */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
752 FALSE, /* pc_relative */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
766 FALSE, /* pc_relative */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
780 TRUE, /* pc_relative */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
794 TRUE, /* pc_relative */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
804 HOWTO (R_ARM_THM_JUMP19, /* type */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
808 TRUE, /* pc_relative */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
818 HOWTO (R_ARM_THM_JUMP6, /* type */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
822 TRUE, /* pc_relative */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
839 TRUE, /* pc_relative */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
849 HOWTO (R_ARM_THM_PC12, /* type */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
853 TRUE, /* pc_relative */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
863 HOWTO (R_ARM_ABS32_NOI, /* type */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
867 FALSE, /* pc_relative */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
877 HOWTO (R_ARM_REL32_NOI, /* type */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
881 TRUE, /* pc_relative */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
891 /* Group relocations. */
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
897 TRUE, /* pc_relative */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
911 TRUE, /* pc_relative */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
925 TRUE, /* pc_relative */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
939 TRUE, /* pc_relative */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
953 TRUE, /* pc_relative */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
967 TRUE, /* pc_relative */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
981 TRUE, /* pc_relative */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
995 TRUE, /* pc_relative */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1009 TRUE, /* pc_relative */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1023 TRUE, /* pc_relative */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1037 TRUE, /* pc_relative */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 TRUE, /* pc_relative */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1065 TRUE, /* pc_relative */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1079 TRUE, /* pc_relative */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1093 TRUE, /* pc_relative */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1107 TRUE, /* pc_relative */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 TRUE, /* pc_relative */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1135 TRUE, /* pc_relative */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1149 TRUE, /* pc_relative */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1163 TRUE, /* pc_relative */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1177 TRUE, /* pc_relative */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1191 TRUE, /* pc_relative */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1205 TRUE, /* pc_relative */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1219 TRUE, /* pc_relative */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1233 TRUE, /* pc_relative */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1247 TRUE, /* pc_relative */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1261 TRUE, /* pc_relative */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1271 /* End of group relocations. */
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1277 FALSE, /* pc_relative */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1291 FALSE, /* pc_relative */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1305 FALSE, /* pc_relative */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1319 FALSE, /* pc_relative */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1333 FALSE, /* pc_relative */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 FALSE, /* pc_relative */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1357 EMPTY_HOWTO (90), /* Unallocated. */
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1366 FALSE, /* pc_relative */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1380 FALSE, /* pc_relative */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1394 TRUE, /* pc_relative */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1408 FALSE, /* pc_relative */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1422 FALSE, /* pc_relative */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1439 FALSE, /* pc_relative */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1447 FALSE), /* pcrel_offset */
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1454 FALSE, /* pc_relative */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1462 FALSE), /* pcrel_offset */
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1468 TRUE, /* pc_relative */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1482 TRUE, /* pc_relative */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1497 FALSE, /* pc_relative */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1511 FALSE, /* pc_relative */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1525 FALSE, /* pc_relative */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1539 FALSE, /* pc_relative */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1553 FALSE, /* pc_relative */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1567 FALSE, /* pc_relative */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1581 FALSE, /* pc_relative */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1595 FALSE, /* pc_relative */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1610 249-255 extended, currently unused, relocations: */
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1614 HOWTO (R_ARM_RREL32, /* type */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1618 FALSE, /* pc_relative */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1626 FALSE), /* pcrel_offset */
1628 HOWTO (R_ARM_RABS32, /* type */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1632 FALSE, /* pc_relative */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1640 FALSE), /* pcrel_offset */
1642 HOWTO (R_ARM_RPC24, /* type */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1646 FALSE, /* pc_relative */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1654 FALSE), /* pcrel_offset */
1656 HOWTO (R_ARM_RBASE, /* type */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1660 FALSE, /* pc_relative */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1668 FALSE) /* pcrel_offset */
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1688 unsigned int r_type;
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1694 struct elf32_arm_reloc_map
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1815 /* Support for core dump NOTE sections. */
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1823 switch (note->descsz)
1828 case 148: /* Linux/ARM 32-bit. */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1850 switch (note->descsz)
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1912 /* The name of the dynamic interpreter. This is put in the .interp
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1916 #ifdef FOUR_WORD_PLT
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1930 /* Subsequent entries in a procedure linkage table look like
1932 static const bfd_vma elf32_arm_plt_entry [] =
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1955 /* Subsequent entries in a procedure linkage table look like
1957 static const bfd_vma elf32_arm_plt_entry [] =
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2006 /* The entries in a PLT when using a DLL-based target with multiple
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2171 /* Cortex-A8 erratum-workaround stubs. */
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2183 /* Stub used for b.w and bl.w instructions. */
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2204 /* Section name for stubs is the associated section name plus this
2206 #define STUB_SUFFIX ".stub"
2208 /* One entry per long/short branch stub defined above. */
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2236 const insn_sequence* template;
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2246 struct elf32_arm_stub_hash_entry
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2251 /* The stub section. */
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2294 /* Used to build a map of a section. This is required for mixed-endian
2297 typedef struct elf32_elf_section_map
2302 elf32_arm_section_map;
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2313 elf32_vfp11_erratum_type;
2315 typedef struct elf32_vfp11_erratum_list
2317 struct elf32_vfp11_erratum_list *next;
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2328 struct elf32_vfp11_erratum_list *branch;
2332 elf32_vfp11_erratum_type type;
2334 elf32_vfp11_erratum_list;
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2341 arm_unwind_edit_type;
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2352 struct arm_unwind_table_edit *next;
2354 arm_unwind_table_edit;
2356 typedef struct _arm_elf_section_data
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2369 /* Unwind info attached to a text section. */
2372 asection *arm_exidx_sec;
2375 /* Unwind info attached to an .ARM.exidx section. */
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2383 _arm_elf_section_data;
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2394 struct a8_erratum_fix {
2399 unsigned long orig_insn;
2401 enum elf32_arm_stub_type stub_type;
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 struct a8_erratum_reloc {
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2416 /* The size of the thread control block. */
2419 struct elf_arm_obj_tdata
2421 struct elf_obj_tdata root;
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2445 elf32_arm_mkobject (bfd *abfd)
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2476 struct elf_link_hash_entry root;
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2504 /* A pointer to the most recently used stub hash entry against this
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2545 bfd_size_type vfp11_erratum_glue_size;
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2556 /* Nonzero to output a BE8 image. */
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2584 /* Nonzero to force PIC branch veneers. */
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2593 /* True if the target system is VxWorks. */
2596 /* True if the target system is Symbian OS. */
2599 /* True if the target uses REL relocations. */
2602 /* Short-cuts to get to dynamic linker sections. */
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2617 bfd_signed_vma refcount;
2621 /* Small local sym cache. */
2622 struct sym_cache sym_cache;
2624 /* For convenience in allocate_dynrelocs. */
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2630 /* Linker stub bfd. */
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2641 /* This is the section to which stubs in the group will be
2644 /* The stub section. */
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2651 asection **input_list;
2654 /* Create an entry in an ARM ELF linker hash table. */
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2664 /* Allocate the structure if it has not already been allocated by a
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2669 return (struct bfd_hash_entry *) ret;
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2684 ret->stub_cache = NULL;
2687 return (struct bfd_hash_entry *) ret;
2690 /* Initialize an entry in the stub hash table. */
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2697 /* Allocate the structure if it has not already been allocated by a
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2711 struct elf32_arm_stub_hash_entry *eh;
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->target_addend = 0;
2721 eh->stub_type = arm_stub_none;
2723 eh->stub_template = NULL;
2724 eh->stub_template_size = 0;
2727 eh->output_name = NULL;
2733 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2734 shortcuts to them in our hash table. */
2737 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2739 struct elf32_arm_link_hash_table *htab;
2741 htab = elf32_arm_hash_table (info);
2742 /* BPABI objects never have a GOT, or associated sections. */
2743 if (htab->symbian_p)
2746 if (! _bfd_elf_create_got_section (dynobj, info))
2749 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2750 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2751 if (!htab->sgot || !htab->sgotplt)
2754 htab->srelgot = bfd_get_section_by_name (dynobj,
2755 RELOC_SECTION (htab, ".got"));
2756 if (htab->srelgot == NULL)
2761 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2762 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2766 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2768 struct elf32_arm_link_hash_table *htab;
2770 htab = elf32_arm_hash_table (info);
2771 if (!htab->sgot && !create_got_section (dynobj, info))
2774 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2777 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2778 htab->srelplt = bfd_get_section_by_name (dynobj,
2779 RELOC_SECTION (htab, ".plt"));
2780 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2782 htab->srelbss = bfd_get_section_by_name (dynobj,
2783 RELOC_SECTION (htab, ".bss"));
2785 if (htab->vxworks_p)
2787 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2792 htab->plt_header_size = 0;
2793 htab->plt_entry_size
2794 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2798 htab->plt_header_size
2799 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2800 htab->plt_entry_size
2801 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2808 || (!info->shared && !htab->srelbss))
2814 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2817 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2818 struct elf_link_hash_entry *dir,
2819 struct elf_link_hash_entry *ind)
2821 struct elf32_arm_link_hash_entry *edir, *eind;
2823 edir = (struct elf32_arm_link_hash_entry *) dir;
2824 eind = (struct elf32_arm_link_hash_entry *) ind;
2826 if (eind->relocs_copied != NULL)
2828 if (edir->relocs_copied != NULL)
2830 struct elf32_arm_relocs_copied **pp;
2831 struct elf32_arm_relocs_copied *p;
2833 /* Add reloc counts against the indirect sym to the direct sym
2834 list. Merge any entries against the same section. */
2835 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2837 struct elf32_arm_relocs_copied *q;
2839 for (q = edir->relocs_copied; q != NULL; q = q->next)
2840 if (q->section == p->section)
2842 q->pc_count += p->pc_count;
2843 q->count += p->count;
2850 *pp = edir->relocs_copied;
2853 edir->relocs_copied = eind->relocs_copied;
2854 eind->relocs_copied = NULL;
2857 if (ind->root.type == bfd_link_hash_indirect)
2859 /* Copy over PLT info. */
2860 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2861 eind->plt_thumb_refcount = 0;
2862 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2863 eind->plt_maybe_thumb_refcount = 0;
2865 if (dir->got.refcount <= 0)
2867 edir->tls_type = eind->tls_type;
2868 eind->tls_type = GOT_UNKNOWN;
2872 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2875 /* Create an ARM elf linker hash table. */
2877 static struct bfd_link_hash_table *
2878 elf32_arm_link_hash_table_create (bfd *abfd)
2880 struct elf32_arm_link_hash_table *ret;
2881 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2883 ret = bfd_malloc (amt);
2887 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2888 elf32_arm_link_hash_newfunc,
2889 sizeof (struct elf32_arm_link_hash_entry)))
2896 ret->sgotplt = NULL;
2897 ret->srelgot = NULL;
2899 ret->srelplt = NULL;
2900 ret->sdynbss = NULL;
2901 ret->srelbss = NULL;
2902 ret->srelplt2 = NULL;
2903 ret->thumb_glue_size = 0;
2904 ret->arm_glue_size = 0;
2905 ret->bx_glue_size = 0;
2906 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2907 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2908 ret->vfp11_erratum_glue_size = 0;
2909 ret->num_vfp11_fixes = 0;
2910 ret->fix_cortex_a8 = 0;
2911 ret->bfd_of_glue_owner = NULL;
2912 ret->byteswap_code = 0;
2913 ret->target1_is_rel = 0;
2914 ret->target2_reloc = R_ARM_NONE;
2915 #ifdef FOUR_WORD_PLT
2916 ret->plt_header_size = 16;
2917 ret->plt_entry_size = 16;
2919 ret->plt_header_size = 20;
2920 ret->plt_entry_size = 12;
2927 ret->sym_cache.abfd = NULL;
2929 ret->tls_ldm_got.refcount = 0;
2930 ret->stub_bfd = NULL;
2931 ret->add_stub_section = NULL;
2932 ret->layout_sections_again = NULL;
2933 ret->stub_group = NULL;
2936 ret->input_list = NULL;
2938 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2939 sizeof (struct elf32_arm_stub_hash_entry)))
2945 return &ret->root.root;
2948 /* Free the derived linker hash table. */
2951 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2953 struct elf32_arm_link_hash_table *ret
2954 = (struct elf32_arm_link_hash_table *) hash;
2956 bfd_hash_table_free (&ret->stub_hash_table);
2957 _bfd_generic_link_hash_table_free (hash);
2960 /* Determine if we're dealing with a Thumb only architecture. */
2963 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2965 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2969 if (arch != TAG_CPU_ARCH_V7)
2972 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2973 Tag_CPU_arch_profile);
2975 return profile == 'M';
2978 /* Determine if we're dealing with a Thumb-2 object. */
2981 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2983 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2985 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2989 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2993 case arm_stub_long_branch_thumb_only:
2994 case arm_stub_long_branch_v4t_thumb_arm:
2995 case arm_stub_short_branch_v4t_thumb_arm:
2996 case arm_stub_long_branch_v4t_thumb_arm_pic:
2997 case arm_stub_long_branch_thumb_only_pic:
3008 /* Determine the type of stub needed, if any, for a call. */
3010 static enum elf32_arm_stub_type
3011 arm_type_of_stub (struct bfd_link_info *info,
3012 asection *input_sec,
3013 const Elf_Internal_Rela *rel,
3014 unsigned char st_type,
3015 struct elf32_arm_link_hash_entry *hash,
3016 bfd_vma destination,
3022 bfd_signed_vma branch_offset;
3023 unsigned int r_type;
3024 struct elf32_arm_link_hash_table * globals;
3027 enum elf32_arm_stub_type stub_type = arm_stub_none;
3030 /* We don't know the actual type of destination in case it is of
3031 type STT_SECTION: give up. */
3032 if (st_type == STT_SECTION)
3035 globals = elf32_arm_hash_table (info);
3037 thumb_only = using_thumb_only (globals);
3039 thumb2 = using_thumb2 (globals);
3041 /* Determine where the call point is. */
3042 location = (input_sec->output_offset
3043 + input_sec->output_section->vma
3046 branch_offset = (bfd_signed_vma)(destination - location);
3048 r_type = ELF32_R_TYPE (rel->r_info);
3050 /* Keep a simpler condition, for the sake of clarity. */
3051 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3054 /* Note when dealing with PLT entries: the main PLT stub is in
3055 ARM mode, so if the branch is in Thumb mode, another
3056 Thumb->ARM stub will be inserted later just before the ARM
3057 PLT stub. We don't take this extra distance into account
3058 here, because if a long branch stub is needed, we'll add a
3059 Thumb->Arm one and branch directly to the ARM PLT entry
3060 because it avoids spreading offset corrections in several
3064 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3066 /* Handle cases where:
3067 - this call goes too far (different Thumb/Thumb2 max
3069 - it's a Thumb->Arm call and blx is not available, or it's a
3070 Thumb->Arm branch (not bl). A stub is needed in this case,
3071 but only if this call is not through a PLT entry. Indeed,
3072 PLT stubs handle mode switching already.
3075 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3076 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3078 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3079 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3080 || ((st_type != STT_ARM_TFUNC)
3081 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3082 || (r_type == R_ARM_THM_JUMP24))
3085 if (st_type == STT_ARM_TFUNC)
3087 /* Thumb to thumb. */
3090 stub_type = (info->shared | globals->pic_veneer)
3092 ? ((globals->use_blx
3093 && (r_type ==R_ARM_THM_CALL))
3094 /* V5T and above. Stub starts with ARM code, so
3095 we must be able to switch mode before
3096 reaching it, which is only possible for 'bl'
3097 (ie R_ARM_THM_CALL relocation). */
3098 ? arm_stub_long_branch_any_thumb_pic
3099 /* On V4T, use Thumb code only. */
3100 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3102 /* non-PIC stubs. */
3103 : ((globals->use_blx
3104 && (r_type ==R_ARM_THM_CALL))
3105 /* V5T and above. */
3106 ? arm_stub_long_branch_any_any
3108 : arm_stub_long_branch_v4t_thumb_thumb);
3112 stub_type = (info->shared | globals->pic_veneer)
3114 ? arm_stub_long_branch_thumb_only_pic
3116 : arm_stub_long_branch_thumb_only;
3123 && sym_sec->owner != NULL
3124 && !INTERWORK_FLAG (sym_sec->owner))
3126 (*_bfd_error_handler)
3127 (_("%B(%s): warning: interworking not enabled.\n"
3128 " first occurrence: %B: Thumb call to ARM"),
3129 sym_sec->owner, input_bfd, name);
3132 stub_type = (info->shared | globals->pic_veneer)
3134 ? ((globals->use_blx
3135 && (r_type ==R_ARM_THM_CALL))
3136 /* V5T and above. */
3137 ? arm_stub_long_branch_any_arm_pic
3139 : arm_stub_long_branch_v4t_thumb_arm_pic)
3141 /* non-PIC stubs. */
3142 : ((globals->use_blx
3143 && (r_type ==R_ARM_THM_CALL))
3144 /* V5T and above. */
3145 ? arm_stub_long_branch_any_any
3147 : arm_stub_long_branch_v4t_thumb_arm);
3149 /* Handle v4t short branches. */
3150 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3151 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3152 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3153 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3157 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3159 if (st_type == STT_ARM_TFUNC)
3164 && sym_sec->owner != NULL
3165 && !INTERWORK_FLAG (sym_sec->owner))
3167 (*_bfd_error_handler)
3168 (_("%B(%s): warning: interworking not enabled.\n"
3169 " first occurrence: %B: ARM call to Thumb"),
3170 sym_sec->owner, input_bfd, name);
3173 /* We have an extra 2-bytes reach because of
3174 the mode change (bit 24 (H) of BLX encoding). */
3175 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3176 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3177 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3178 || (r_type == R_ARM_JUMP24)
3179 || (r_type == R_ARM_PLT32))
3181 stub_type = (info->shared | globals->pic_veneer)
3183 ? ((globals->use_blx)
3184 /* V5T and above. */
3185 ? arm_stub_long_branch_any_thumb_pic
3187 : arm_stub_long_branch_v4t_arm_thumb_pic)
3189 /* non-PIC stubs. */
3190 : ((globals->use_blx)
3191 /* V5T and above. */
3192 ? arm_stub_long_branch_any_any
3194 : arm_stub_long_branch_v4t_arm_thumb);
3200 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3201 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3203 stub_type = (info->shared | globals->pic_veneer)
3205 ? arm_stub_long_branch_any_arm_pic
3206 /* non-PIC stubs. */
3207 : arm_stub_long_branch_any_any;
3215 /* Build a name for an entry in the stub hash table. */
3218 elf32_arm_stub_name (const asection *input_section,
3219 const asection *sym_sec,
3220 const struct elf32_arm_link_hash_entry *hash,
3221 const Elf_Internal_Rela *rel)
3228 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3229 stub_name = bfd_malloc (len);
3230 if (stub_name != NULL)
3231 sprintf (stub_name, "%08x_%s+%x",
3232 input_section->id & 0xffffffff,
3233 hash->root.root.root.string,
3234 (int) rel->r_addend & 0xffffffff);
3238 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3239 stub_name = bfd_malloc (len);
3240 if (stub_name != NULL)
3241 sprintf (stub_name, "%08x_%x:%x+%x",
3242 input_section->id & 0xffffffff,
3243 sym_sec->id & 0xffffffff,
3244 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3245 (int) rel->r_addend & 0xffffffff);
3251 /* Look up an entry in the stub hash. Stub entries are cached because
3252 creating the stub name takes a bit of time. */
3254 static struct elf32_arm_stub_hash_entry *
3255 elf32_arm_get_stub_entry (const asection *input_section,
3256 const asection *sym_sec,
3257 struct elf_link_hash_entry *hash,
3258 const Elf_Internal_Rela *rel,
3259 struct elf32_arm_link_hash_table *htab)
3261 struct elf32_arm_stub_hash_entry *stub_entry;
3262 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3263 const asection *id_sec;
3265 if ((input_section->flags & SEC_CODE) == 0)
3268 /* If this input section is part of a group of sections sharing one
3269 stub section, then use the id of the first section in the group.
3270 Stub names need to include a section id, as there may well be
3271 more than one stub used to reach say, printf, and we need to
3272 distinguish between them. */
3273 id_sec = htab->stub_group[input_section->id].link_sec;
3275 if (h != NULL && h->stub_cache != NULL
3276 && h->stub_cache->h == h
3277 && h->stub_cache->id_sec == id_sec)
3279 stub_entry = h->stub_cache;
3285 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3286 if (stub_name == NULL)
3289 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3290 stub_name, FALSE, FALSE);
3292 h->stub_cache = stub_entry;
3300 /* Find or create a stub section. Returns a pointer to the stub section, and
3301 the section to which the stub section will be attached (in *LINK_SEC_P).
3302 LINK_SEC_P may be NULL. */
3305 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3306 struct elf32_arm_link_hash_table *htab)
3311 link_sec = htab->stub_group[section->id].link_sec;
3312 stub_sec = htab->stub_group[section->id].stub_sec;
3313 if (stub_sec == NULL)
3315 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3316 if (stub_sec == NULL)
3322 namelen = strlen (link_sec->name);
3323 len = namelen + sizeof (STUB_SUFFIX);
3324 s_name = bfd_alloc (htab->stub_bfd, len);
3328 memcpy (s_name, link_sec->name, namelen);
3329 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3330 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3331 if (stub_sec == NULL)
3333 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3335 htab->stub_group[section->id].stub_sec = stub_sec;
3339 *link_sec_p = link_sec;
3344 /* Add a new stub entry to the stub hash. Not all fields of the new
3345 stub entry are initialised. */
3347 static struct elf32_arm_stub_hash_entry *
3348 elf32_arm_add_stub (const char *stub_name,
3350 struct elf32_arm_link_hash_table *htab)
3354 struct elf32_arm_stub_hash_entry *stub_entry;
3356 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3357 if (stub_sec == NULL)
3360 /* Enter this entry into the linker stub hash table. */
3361 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3363 if (stub_entry == NULL)
3365 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3371 stub_entry->stub_sec = stub_sec;
3372 stub_entry->stub_offset = 0;
3373 stub_entry->id_sec = link_sec;
3378 /* Store an Arm insn into an output section not processed by
3379 elf32_arm_write_section. */
3382 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3383 bfd * output_bfd, bfd_vma val, void * ptr)
3385 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3386 bfd_putl32 (val, ptr);
3388 bfd_putb32 (val, ptr);
3391 /* Store a 16-bit Thumb insn into an output section not processed by
3392 elf32_arm_write_section. */
3395 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3396 bfd * output_bfd, bfd_vma val, void * ptr)
3398 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3399 bfd_putl16 (val, ptr);
3401 bfd_putb16 (val, ptr);
3404 static bfd_reloc_status_type elf32_arm_final_link_relocate
3405 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3406 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3407 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3410 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3414 struct elf32_arm_stub_hash_entry *stub_entry;
3415 struct bfd_link_info *info;
3416 struct elf32_arm_link_hash_table *htab;
3424 const insn_sequence *template;
3426 struct elf32_arm_link_hash_table * globals;
3427 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3428 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3431 /* Massage our args to the form they really have. */
3432 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3433 info = (struct bfd_link_info *) in_arg;
3435 globals = elf32_arm_hash_table (info);
3437 htab = elf32_arm_hash_table (info);
3438 stub_sec = stub_entry->stub_sec;
3440 /* Make a note of the offset within the stubs for this entry. */
3441 stub_entry->stub_offset = stub_sec->size;
3442 loc = stub_sec->contents + stub_entry->stub_offset;
3444 stub_bfd = stub_sec->owner;
3446 /* This is the address of the start of the stub. */
3447 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3448 + stub_entry->stub_offset;
3450 /* This is the address of the stub destination. */
3451 sym_value = (stub_entry->target_value
3452 + stub_entry->target_section->output_offset
3453 + stub_entry->target_section->output_section->vma);
3455 template = stub_entry->stub_template;
3456 template_size = stub_entry->stub_template_size;
3459 for (i = 0; i < template_size; i++)
3461 switch (template[i].type)
3465 bfd_vma data = template[i].data;
3466 if (template[i].reloc_addend != 0)
3468 /* We've borrowed the reloc_addend field to mean we should
3469 insert a condition code into this (Thumb-1 branch)
3470 instruction. See THUMB16_BCOND_INSN. */
3471 BFD_ASSERT ((data & 0xff00) == 0xd000);
3472 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3474 put_thumb_insn (globals, stub_bfd, data, loc + size);
3480 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3482 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3484 if (template[i].r_type != R_ARM_NONE)
3486 stub_reloc_idx[nrelocs] = i;
3487 stub_reloc_offset[nrelocs++] = size;
3493 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3494 /* Handle cases where the target is encoded within the
3496 if (template[i].r_type == R_ARM_JUMP24)
3498 stub_reloc_idx[nrelocs] = i;
3499 stub_reloc_offset[nrelocs++] = size;
3505 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3506 stub_reloc_idx[nrelocs] = i;
3507 stub_reloc_offset[nrelocs++] = size;
3517 stub_sec->size += size;
3519 /* Stub size has already been computed in arm_size_one_stub. Check
3521 BFD_ASSERT (size == stub_entry->stub_size);
3523 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3524 if (stub_entry->st_type == STT_ARM_TFUNC)
3527 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3529 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3531 for (i = 0; i < nrelocs; i++)
3532 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3533 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3534 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3535 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3537 Elf_Internal_Rela rel;
3538 bfd_boolean unresolved_reloc;
3539 char *error_message;
3541 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3542 ? STT_ARM_TFUNC : 0;
3543 bfd_vma points_to = sym_value + stub_entry->target_addend;
3545 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3546 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3547 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3549 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3550 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3551 template should refer back to the instruction after the original
3553 points_to = sym_value;
3555 /* There may be unintended consequences if this is not true. */
3556 BFD_ASSERT (stub_entry->h == NULL);
3558 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3559 properly. We should probably use this function unconditionally,
3560 rather than only for certain relocations listed in the enclosing
3561 conditional, for the sake of consistency. */
3562 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3563 (template[stub_reloc_idx[i]].r_type),
3564 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3565 points_to, info, stub_entry->target_section, "", sym_flags,
3566 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3571 _bfd_final_link_relocate (elf32_arm_howto_from_type
3572 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3573 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3574 sym_value + stub_entry->target_addend,
3575 template[stub_reloc_idx[i]].reloc_addend);
3582 /* Calculate the template, template size and instruction size for a stub.
3583 Return value is the instruction size. */
3586 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3587 const insn_sequence **stub_template,
3588 int *stub_template_size)
3590 const insn_sequence *template = NULL;
3591 int template_size = 0, i;
3594 template = stub_definitions[stub_type].template;
3595 template_size = stub_definitions[stub_type].template_size;
3598 for (i = 0; i < template_size; i++)
3600 switch (template[i].type)
3619 *stub_template = template;
3621 if (stub_template_size)
3622 *stub_template_size = template_size;
3627 /* As above, but don't actually build the stub. Just bump offset so
3628 we know stub section sizes. */
3631 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3634 struct elf32_arm_stub_hash_entry *stub_entry;
3635 struct elf32_arm_link_hash_table *htab;
3636 const insn_sequence *template;
3637 int template_size, size;
3639 /* Massage our args to the form they really have. */
3640 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3641 htab = (struct elf32_arm_link_hash_table *) in_arg;
3643 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3644 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3646 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3649 stub_entry->stub_size = size;
3650 stub_entry->stub_template = template;
3651 stub_entry->stub_template_size = template_size;
3653 size = (size + 7) & ~7;
3654 stub_entry->stub_sec->size += size;
3659 /* External entry points for sizing and building linker stubs. */
3661 /* Set up various things so that we can make a list of input sections
3662 for each output section included in the link. Returns -1 on error,
3663 0 when no stubs will be needed, and 1 on success. */
3666 elf32_arm_setup_section_lists (bfd *output_bfd,
3667 struct bfd_link_info *info)
3670 unsigned int bfd_count;
3671 int top_id, top_index;
3673 asection **input_list, **list;
3675 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3677 if (! is_elf_hash_table (htab))
3680 /* Count the number of input BFDs and find the top input section id. */
3681 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3683 input_bfd = input_bfd->link_next)
3686 for (section = input_bfd->sections;
3688 section = section->next)
3690 if (top_id < section->id)
3691 top_id = section->id;
3694 htab->bfd_count = bfd_count;
3696 amt = sizeof (struct map_stub) * (top_id + 1);
3697 htab->stub_group = bfd_zmalloc (amt);
3698 if (htab->stub_group == NULL)
3701 /* We can't use output_bfd->section_count here to find the top output
3702 section index as some sections may have been removed, and
3703 _bfd_strip_section_from_output doesn't renumber the indices. */
3704 for (section = output_bfd->sections, top_index = 0;
3706 section = section->next)
3708 if (top_index < section->index)
3709 top_index = section->index;
3712 htab->top_index = top_index;
3713 amt = sizeof (asection *) * (top_index + 1);
3714 input_list = bfd_malloc (amt);
3715 htab->input_list = input_list;
3716 if (input_list == NULL)
3719 /* For sections we aren't interested in, mark their entries with a
3720 value we can check later. */
3721 list = input_list + top_index;
3723 *list = bfd_abs_section_ptr;
3724 while (list-- != input_list);
3726 for (section = output_bfd->sections;
3728 section = section->next)
3730 if ((section->flags & SEC_CODE) != 0)
3731 input_list[section->index] = NULL;
3737 /* The linker repeatedly calls this function for each input section,
3738 in the order that input sections are linked into output sections.
3739 Build lists of input sections to determine groupings between which
3740 we may insert linker stubs. */
3743 elf32_arm_next_input_section (struct bfd_link_info *info,
3746 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3748 if (isec->output_section->index <= htab->top_index)
3750 asection **list = htab->input_list + isec->output_section->index;
3752 if (*list != bfd_abs_section_ptr)
3754 /* Steal the link_sec pointer for our list. */
3755 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3756 /* This happens to make the list in reverse order,
3757 which we reverse later. */
3758 PREV_SEC (isec) = *list;
3764 /* See whether we can group stub sections together. Grouping stub
3765 sections may result in fewer stubs. More importantly, we need to
3766 put all .init* and .fini* stubs at the end of the .init or
3767 .fini output sections respectively, because glibc splits the
3768 _init and _fini functions into multiple parts. Putting a stub in
3769 the middle of a function is not a good idea. */
3772 group_sections (struct elf32_arm_link_hash_table *htab,
3773 bfd_size_type stub_group_size,
3774 bfd_boolean stubs_always_after_branch)
3776 asection **list = htab->input_list;
3780 asection *tail = *list;
3783 if (tail == bfd_abs_section_ptr)
3786 /* Reverse the list: we must avoid placing stubs at the
3787 beginning of the section because the beginning of the text
3788 section may be required for an interrupt vector in bare metal
3790 #define NEXT_SEC PREV_SEC
3792 while (tail != NULL)
3794 /* Pop from tail. */
3795 asection *item = tail;
3796 tail = PREV_SEC (item);
3799 NEXT_SEC (item) = head;
3803 while (head != NULL)
3807 bfd_vma stub_group_start = head->output_offset;
3808 bfd_vma end_of_next;
3811 while (NEXT_SEC (curr) != NULL)
3813 next = NEXT_SEC (curr);
3814 end_of_next = next->output_offset + next->size;
3815 if (end_of_next - stub_group_start >= stub_group_size)
3816 /* End of NEXT is too far from start, so stop. */
3818 /* Add NEXT to the group. */
3822 /* OK, the size from the start to the start of CURR is less
3823 than stub_group_size and thus can be handled by one stub
3824 section. (Or the head section is itself larger than
3825 stub_group_size, in which case we may be toast.)
3826 We should really be keeping track of the total size of
3827 stubs added here, as stubs contribute to the final output
3831 next = NEXT_SEC (head);
3832 /* Set up this stub group. */
3833 htab->stub_group[head->id].link_sec = curr;
3835 while (head != curr && (head = next) != NULL);
3837 /* But wait, there's more! Input sections up to stub_group_size
3838 bytes after the stub section can be handled by it too. */
3839 if (!stubs_always_after_branch)
3841 stub_group_start = curr->output_offset + curr->size;
3843 while (next != NULL)
3845 end_of_next = next->output_offset + next->size;
3846 if (end_of_next - stub_group_start >= stub_group_size)
3847 /* End of NEXT is too far from stubs, so stop. */
3849 /* Add NEXT to the stub group. */
3851 next = NEXT_SEC (head);
3852 htab->stub_group[head->id].link_sec = curr;
3858 while (list++ != htab->input_list + htab->top_index);
3860 free (htab->input_list);
3865 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3869 a8_reloc_compare (const void *a, const void *b)
3871 const struct a8_erratum_reloc *ra = a, *rb = b;
3873 if (ra->from < rb->from)
3875 else if (ra->from > rb->from)
3881 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3882 const char *, char **);
3884 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3885 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3886 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3890 cortex_a8_erratum_scan (bfd *input_bfd,
3891 struct bfd_link_info *info,
3892 struct a8_erratum_fix **a8_fixes_p,
3893 unsigned int *num_a8_fixes_p,
3894 unsigned int *a8_fix_table_size_p,
3895 struct a8_erratum_reloc *a8_relocs,
3896 unsigned int num_a8_relocs)
3899 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3900 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3901 unsigned int num_a8_fixes = *num_a8_fixes_p;
3902 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3904 for (section = input_bfd->sections;
3906 section = section->next)
3908 bfd_byte *contents = NULL;
3909 struct _arm_elf_section_data *sec_data;
3913 if (elf_section_type (section) != SHT_PROGBITS
3914 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3915 || (section->flags & SEC_EXCLUDE) != 0
3916 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3917 || (section->output_section == bfd_abs_section_ptr))
3920 base_vma = section->output_section->vma + section->output_offset;
3922 if (elf_section_data (section)->this_hdr.contents != NULL)
3923 contents = elf_section_data (section)->this_hdr.contents;
3924 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3927 sec_data = elf32_arm_section_data (section);
3929 for (span = 0; span < sec_data->mapcount; span++)
3931 unsigned int span_start = sec_data->map[span].vma;
3932 unsigned int span_end = (span == sec_data->mapcount - 1)
3933 ? section->size : sec_data->map[span + 1].vma;
3935 char span_type = sec_data->map[span].type;
3936 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3938 if (span_type != 't')
3941 /* Span is entirely within a single 4KB region: skip scanning. */
3942 if (((base_vma + span_start) & ~0xfff)
3943 == ((base_vma + span_end) & ~0xfff))
3946 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3948 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3949 * The branch target is in the same 4KB region as the
3950 first half of the branch.
3951 * The instruction before the branch is a 32-bit
3952 length non-branch instruction. */
3953 for (i = span_start; i < span_end;)
3955 unsigned int insn = bfd_getl16 (&contents[i]);
3956 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3957 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3959 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3964 /* Load the rest of the insn (in manual-friendly order). */
3965 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3967 /* Encoding T4: B<c>.W. */
3968 is_b = (insn & 0xf800d000) == 0xf0009000;
3969 /* Encoding T1: BL<c>.W. */
3970 is_bl = (insn & 0xf800d000) == 0xf000d000;
3971 /* Encoding T2: BLX<c>.W. */
3972 is_blx = (insn & 0xf800d000) == 0xf000c000;
3973 /* Encoding T3: B<c>.W (not permitted in IT block). */
3974 is_bcc = (insn & 0xf800d000) == 0xf0008000
3975 && (insn & 0x07f00000) != 0x03800000;
3978 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3980 if (((base_vma + i) & 0xfff) == 0xffe
3984 && ! last_was_branch)
3986 bfd_signed_vma offset;
3987 bfd_boolean force_target_arm = FALSE;
3988 bfd_boolean force_target_thumb = FALSE;
3990 enum elf32_arm_stub_type stub_type = arm_stub_none;
3991 struct a8_erratum_reloc key, *found;
3993 key.from = base_vma + i;
3994 found = bsearch (&key, a8_relocs, num_a8_relocs,
3995 sizeof (struct a8_erratum_reloc),
4000 char *error_message = NULL;
4001 struct elf_link_hash_entry *entry;
4003 /* We don't care about the error returned from this
4004 function, only if there is glue or not. */
4005 entry = find_thumb_glue (info, found->sym_name,
4009 found->non_a8_stub = TRUE;
4011 if (found->r_type == R_ARM_THM_CALL
4012 && found->st_type != STT_ARM_TFUNC)
4013 force_target_arm = TRUE;
4014 else if (found->r_type == R_ARM_THM_CALL
4015 && found->st_type == STT_ARM_TFUNC)
4016 force_target_thumb = TRUE;
4019 /* Check if we have an offending branch instruction. */
4021 if (found && found->non_a8_stub)
4022 /* We've already made a stub for this instruction, e.g.
4023 it's a long branch or a Thumb->ARM stub. Assume that
4024 stub will suffice to work around the A8 erratum (see
4025 setting of always_after_branch above). */
4029 offset = (insn & 0x7ff) << 1;
4030 offset |= (insn & 0x3f0000) >> 4;
4031 offset |= (insn & 0x2000) ? 0x40000 : 0;
4032 offset |= (insn & 0x800) ? 0x80000 : 0;
4033 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4034 if (offset & 0x100000)
4035 offset |= ~ ((bfd_signed_vma) 0xfffff);
4036 stub_type = arm_stub_a8_veneer_b_cond;
4038 else if (is_b || is_bl || is_blx)
4040 int s = (insn & 0x4000000) != 0;
4041 int j1 = (insn & 0x2000) != 0;
4042 int j2 = (insn & 0x800) != 0;
4046 offset = (insn & 0x7ff) << 1;
4047 offset |= (insn & 0x3ff0000) >> 4;
4051 if (offset & 0x1000000)
4052 offset |= ~ ((bfd_signed_vma) 0xffffff);
4055 offset &= ~ ((bfd_signed_vma) 3);
4057 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4058 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4061 if (stub_type != arm_stub_none)
4063 bfd_vma pc_for_insn = base_vma + i + 4;
4065 /* The original instruction is a BL, but the target is
4066 an ARM instruction. If we were not making a stub,
4067 the BL would have been converted to a BLX. Use the
4068 BLX stub instead in that case. */
4069 if (htab->use_blx && force_target_arm
4070 && stub_type == arm_stub_a8_veneer_bl)
4072 stub_type = arm_stub_a8_veneer_blx;
4076 /* Conversely, if the original instruction was
4077 BLX but the target is Thumb mode, use the BL
4079 else if (force_target_thumb
4080 && stub_type == arm_stub_a8_veneer_blx)
4082 stub_type = arm_stub_a8_veneer_bl;
4088 pc_for_insn &= ~ ((bfd_vma) 3);
4090 /* If we found a relocation, use the proper destination,
4091 not the offset in the (unrelocated) instruction.
4092 Note this is always done if we switched the stub type
4096 (bfd_signed_vma) (found->destination - pc_for_insn);
4098 target = pc_for_insn + offset;
4100 /* The BLX stub is ARM-mode code. Adjust the offset to
4101 take the different PC value (+8 instead of +4) into
4103 if (stub_type == arm_stub_a8_veneer_blx)
4106 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4110 if (num_a8_fixes == a8_fix_table_size)
4112 a8_fix_table_size *= 2;
4113 a8_fixes = bfd_realloc (a8_fixes,
4114 sizeof (struct a8_erratum_fix)
4115 * a8_fix_table_size);
4118 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4119 if (stub_name != NULL)
4120 sprintf (stub_name, "%x:%x", section->id, i);
4122 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4123 a8_fixes[num_a8_fixes].section = section;
4124 a8_fixes[num_a8_fixes].offset = i;
4125 a8_fixes[num_a8_fixes].addend = offset;
4126 a8_fixes[num_a8_fixes].orig_insn = insn;
4127 a8_fixes[num_a8_fixes].stub_name = stub_name;
4128 a8_fixes[num_a8_fixes].stub_type = stub_type;
4135 i += insn_32bit ? 4 : 2;
4136 last_was_32bit = insn_32bit;
4137 last_was_branch = is_32bit_branch;
4141 if (elf_section_data (section)->this_hdr.contents == NULL)
4145 *a8_fixes_p = a8_fixes;
4146 *num_a8_fixes_p = num_a8_fixes;
4147 *a8_fix_table_size_p = a8_fix_table_size;
4152 /* Determine and set the size of the stub section for a final link.
4154 The basic idea here is to examine all the relocations looking for
4155 PC-relative calls to a target that is unreachable with a "bl"
4159 elf32_arm_size_stubs (bfd *output_bfd,
4161 struct bfd_link_info *info,
4162 bfd_signed_vma group_size,
4163 asection * (*add_stub_section) (const char *, asection *),
4164 void (*layout_sections_again) (void))
4166 bfd_size_type stub_group_size;
4167 bfd_boolean stubs_always_after_branch;
4168 bfd_boolean stub_changed = 0;
4169 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4170 struct a8_erratum_fix *a8_fixes = NULL;
4171 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4172 struct a8_erratum_reloc *a8_relocs = NULL;
4173 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4175 if (htab->fix_cortex_a8)
4177 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4178 * a8_fix_table_size);
4179 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4180 * a8_reloc_table_size);
4183 /* Propagate mach to stub bfd, because it may not have been
4184 finalized when we created stub_bfd. */
4185 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4186 bfd_get_mach (output_bfd));
4188 /* Stash our params away. */
4189 htab->stub_bfd = stub_bfd;
4190 htab->add_stub_section = add_stub_section;
4191 htab->layout_sections_again = layout_sections_again;
4192 stubs_always_after_branch = group_size < 0;
4194 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4195 as the first half of a 32-bit branch straddling two 4K pages. This is a
4196 crude way of enforcing that. */
4197 if (htab->fix_cortex_a8)
4198 stubs_always_after_branch = 1;
4201 stub_group_size = -group_size;
4203 stub_group_size = group_size;
4205 if (stub_group_size == 1)
4207 /* Default values. */
4208 /* Thumb branch range is +-4MB has to be used as the default
4209 maximum size (a given section can contain both ARM and Thumb
4210 code, so the worst case has to be taken into account).
4212 This value is 24K less than that, which allows for 2025
4213 12-byte stubs. If we exceed that, then we will fail to link.
4214 The user will have to relink with an explicit group size
4216 stub_group_size = 4170000;
4219 group_sections (htab, stub_group_size, stubs_always_after_branch);
4224 unsigned int bfd_indx;
4229 for (input_bfd = info->input_bfds, bfd_indx = 0;
4231 input_bfd = input_bfd->link_next, bfd_indx++)
4233 Elf_Internal_Shdr *symtab_hdr;
4235 Elf_Internal_Sym *local_syms = NULL;
4239 /* We'll need the symbol table in a second. */
4240 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4241 if (symtab_hdr->sh_info == 0)
4244 /* Walk over each section attached to the input bfd. */
4245 for (section = input_bfd->sections;
4247 section = section->next)
4249 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4251 /* If there aren't any relocs, then there's nothing more
4253 if ((section->flags & SEC_RELOC) == 0
4254 || section->reloc_count == 0
4255 || (section->flags & SEC_CODE) == 0)
4258 /* If this section is a link-once section that will be
4259 discarded, then don't create any stubs. */
4260 if (section->output_section == NULL
4261 || section->output_section->owner != output_bfd)
4264 /* Get the relocs. */
4266 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4267 NULL, info->keep_memory);
4268 if (internal_relocs == NULL)
4269 goto error_ret_free_local;
4271 /* Now examine each relocation. */
4272 irela = internal_relocs;
4273 irelaend = irela + section->reloc_count;
4274 for (; irela < irelaend; irela++)
4276 unsigned int r_type, r_indx;
4277 enum elf32_arm_stub_type stub_type;
4278 struct elf32_arm_stub_hash_entry *stub_entry;
4281 bfd_vma destination;
4282 struct elf32_arm_link_hash_entry *hash;
4283 const char *sym_name;
4285 const asection *id_sec;
4286 unsigned char st_type;
4287 bfd_boolean created_stub = FALSE;
4289 r_type = ELF32_R_TYPE (irela->r_info);
4290 r_indx = ELF32_R_SYM (irela->r_info);
4292 if (r_type >= (unsigned int) R_ARM_max)
4294 bfd_set_error (bfd_error_bad_value);
4295 error_ret_free_internal:
4296 if (elf_section_data (section)->relocs == NULL)
4297 free (internal_relocs);
4298 goto error_ret_free_local;
4301 /* Only look for stubs on branch instructions. */
4302 if ((r_type != (unsigned int) R_ARM_CALL)
4303 && (r_type != (unsigned int) R_ARM_THM_CALL)
4304 && (r_type != (unsigned int) R_ARM_JUMP24)
4305 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4306 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4307 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4308 && (r_type != (unsigned int) R_ARM_PLT32))
4311 /* Now determine the call target, its name, value,
4318 if (r_indx < symtab_hdr->sh_info)
4320 /* It's a local symbol. */
4321 Elf_Internal_Sym *sym;
4322 Elf_Internal_Shdr *hdr;
4324 if (local_syms == NULL)
4327 = (Elf_Internal_Sym *) symtab_hdr->contents;
4328 if (local_syms == NULL)
4330 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4331 symtab_hdr->sh_info, 0,
4333 if (local_syms == NULL)
4334 goto error_ret_free_internal;
4337 sym = local_syms + r_indx;
4338 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4339 sym_sec = hdr->bfd_section;
4340 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4341 sym_value = sym->st_value;
4342 destination = (sym_value + irela->r_addend
4343 + sym_sec->output_offset
4344 + sym_sec->output_section->vma);
4345 st_type = ELF_ST_TYPE (sym->st_info);
4347 = bfd_elf_string_from_elf_section (input_bfd,
4348 symtab_hdr->sh_link,
4353 /* It's an external symbol. */
4356 e_indx = r_indx - symtab_hdr->sh_info;
4357 hash = ((struct elf32_arm_link_hash_entry *)
4358 elf_sym_hashes (input_bfd)[e_indx]);
4360 while (hash->root.root.type == bfd_link_hash_indirect
4361 || hash->root.root.type == bfd_link_hash_warning)
4362 hash = ((struct elf32_arm_link_hash_entry *)
4363 hash->root.root.u.i.link);
4365 if (hash->root.root.type == bfd_link_hash_defined
4366 || hash->root.root.type == bfd_link_hash_defweak)
4368 sym_sec = hash->root.root.u.def.section;
4369 sym_value = hash->root.root.u.def.value;
4371 struct elf32_arm_link_hash_table *globals =
4372 elf32_arm_hash_table (info);
4374 /* For a destination in a shared library,
4375 use the PLT stub as target address to
4376 decide whether a branch stub is
4378 if (globals->splt != NULL && hash != NULL
4379 && hash->root.plt.offset != (bfd_vma) -1)
4381 sym_sec = globals->splt;
4382 sym_value = hash->root.plt.offset;
4383 if (sym_sec->output_section != NULL)
4384 destination = (sym_value
4385 + sym_sec->output_offset
4386 + sym_sec->output_section->vma);
4388 else if (sym_sec->output_section != NULL)
4389 destination = (sym_value + irela->r_addend
4390 + sym_sec->output_offset
4391 + sym_sec->output_section->vma);
4393 else if ((hash->root.root.type == bfd_link_hash_undefined)
4394 || (hash->root.root.type == bfd_link_hash_undefweak))
4396 /* For a shared library, use the PLT stub as
4397 target address to decide whether a long
4398 branch stub is needed.
4399 For absolute code, they cannot be handled. */
4400 struct elf32_arm_link_hash_table *globals =
4401 elf32_arm_hash_table (info);
4403 if (globals->splt != NULL && hash != NULL
4404 && hash->root.plt.offset != (bfd_vma) -1)
4406 sym_sec = globals->splt;
4407 sym_value = hash->root.plt.offset;
4408 if (sym_sec->output_section != NULL)
4409 destination = (sym_value
4410 + sym_sec->output_offset
4411 + sym_sec->output_section->vma);
4418 bfd_set_error (bfd_error_bad_value);
4419 goto error_ret_free_internal;
4421 st_type = ELF_ST_TYPE (hash->root.type);
4422 sym_name = hash->root.root.root.string;
4427 /* Determine what (if any) linker stub is needed. */
4428 stub_type = arm_type_of_stub (info, section, irela,
4430 destination, sym_sec,
4431 input_bfd, sym_name);
4432 if (stub_type == arm_stub_none)
4435 /* Support for grouping stub sections. */
4436 id_sec = htab->stub_group[section->id].link_sec;
4438 /* Get the name of this stub. */
4439 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4442 goto error_ret_free_internal;
4444 /* We've either created a stub for this reloc already,
4445 or we are about to. */
4446 created_stub = TRUE;
4448 stub_entry = arm_stub_hash_lookup
4449 (&htab->stub_hash_table, stub_name,
4451 if (stub_entry != NULL)
4453 /* The proper stub has already been created. */
4458 stub_entry = elf32_arm_add_stub (stub_name, section,
4460 if (stub_entry == NULL)
4463 goto error_ret_free_internal;
4466 stub_entry->target_value = sym_value;
4467 stub_entry->target_section = sym_sec;
4468 stub_entry->stub_type = stub_type;
4469 stub_entry->h = hash;
4470 stub_entry->st_type = st_type;
4472 if (sym_name == NULL)
4473 sym_name = "unnamed";
4474 stub_entry->output_name
4475 = bfd_alloc (htab->stub_bfd,
4476 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4477 + strlen (sym_name));
4478 if (stub_entry->output_name == NULL)
4481 goto error_ret_free_internal;
4484 /* For historical reasons, use the existing names for
4485 ARM-to-Thumb and Thumb-to-ARM stubs. */
4486 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4487 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4488 && st_type != STT_ARM_TFUNC)
4489 sprintf (stub_entry->output_name,
4490 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4491 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4492 || (r_type == (unsigned int) R_ARM_JUMP24))
4493 && st_type == STT_ARM_TFUNC)
4494 sprintf (stub_entry->output_name,
4495 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4497 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4500 stub_changed = TRUE;
4504 /* Look for relocations which might trigger Cortex-A8
4506 if (htab->fix_cortex_a8
4507 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4508 || r_type == (unsigned int) R_ARM_THM_JUMP19
4509 || r_type == (unsigned int) R_ARM_THM_CALL
4510 || r_type == (unsigned int) R_ARM_THM_XPC22))
4512 bfd_vma from = section->output_section->vma
4513 + section->output_offset
4516 if ((from & 0xfff) == 0xffe)
4518 /* Found a candidate. Note we haven't checked the
4519 destination is within 4K here: if we do so (and
4520 don't create an entry in a8_relocs) we can't tell
4521 that a branch should have been relocated when
4523 if (num_a8_relocs == a8_reloc_table_size)
4525 a8_reloc_table_size *= 2;
4526 a8_relocs = bfd_realloc (a8_relocs,
4527 sizeof (struct a8_erratum_reloc)
4528 * a8_reloc_table_size);
4531 a8_relocs[num_a8_relocs].from = from;
4532 a8_relocs[num_a8_relocs].destination = destination;
4533 a8_relocs[num_a8_relocs].r_type = r_type;
4534 a8_relocs[num_a8_relocs].st_type = st_type;
4535 a8_relocs[num_a8_relocs].sym_name = sym_name;
4536 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4543 /* We're done with the internal relocs, free them. */
4544 if (elf_section_data (section)->relocs == NULL)
4545 free (internal_relocs);
4548 if (htab->fix_cortex_a8)
4550 /* Sort relocs which might apply to Cortex-A8 erratum. */
4551 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4554 /* Scan for branches which might trigger Cortex-A8 erratum. */
4555 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4556 &num_a8_fixes, &a8_fix_table_size,
4557 a8_relocs, num_a8_relocs) != 0)
4558 goto error_ret_free_local;
4562 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4563 stub_changed = TRUE;
4568 /* OK, we've added some stubs. Find out the new size of the
4570 for (stub_sec = htab->stub_bfd->sections;
4572 stub_sec = stub_sec->next)
4574 /* Ignore non-stub sections. */
4575 if (!strstr (stub_sec->name, STUB_SUFFIX))
4581 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4583 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4584 if (htab->fix_cortex_a8)
4585 for (i = 0; i < num_a8_fixes; i++)
4587 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4588 a8_fixes[i].section, htab);
4590 if (stub_sec == NULL)
4591 goto error_ret_free_local;
4594 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4599 /* Ask the linker to do its stuff. */
4600 (*htab->layout_sections_again) ();
4601 stub_changed = FALSE;
4602 prev_num_a8_fixes = num_a8_fixes;
4605 /* Add stubs for Cortex-A8 erratum fixes now. */
4606 if (htab->fix_cortex_a8)
4608 for (i = 0; i < num_a8_fixes; i++)
4610 struct elf32_arm_stub_hash_entry *stub_entry;
4611 char *stub_name = a8_fixes[i].stub_name;
4612 asection *section = a8_fixes[i].section;
4613 unsigned int section_id = a8_fixes[i].section->id;
4614 asection *link_sec = htab->stub_group[section_id].link_sec;
4615 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4616 const insn_sequence *template;
4617 int template_size, size = 0;
4619 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4621 if (stub_entry == NULL)
4623 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4629 stub_entry->stub_sec = stub_sec;
4630 stub_entry->stub_offset = 0;
4631 stub_entry->id_sec = link_sec;
4632 stub_entry->stub_type = a8_fixes[i].stub_type;
4633 stub_entry->target_section = a8_fixes[i].section;
4634 stub_entry->target_value = a8_fixes[i].offset;
4635 stub_entry->target_addend = a8_fixes[i].addend;
4636 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4637 stub_entry->st_type = STT_ARM_TFUNC;
4639 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4642 stub_entry->stub_size = size;
4643 stub_entry->stub_template = template;
4644 stub_entry->stub_template_size = template_size;
4647 /* Stash the Cortex-A8 erratum fix array for use later in
4648 elf32_arm_write_section(). */
4649 htab->a8_erratum_fixes = a8_fixes;
4650 htab->num_a8_erratum_fixes = num_a8_fixes;
4654 htab->a8_erratum_fixes = NULL;
4655 htab->num_a8_erratum_fixes = 0;
4659 error_ret_free_local:
4663 /* Build all the stubs associated with the current output file. The
4664 stubs are kept in a hash table attached to the main linker hash
4665 table. We also set up the .plt entries for statically linked PIC
4666 functions here. This function is called via arm_elf_finish in the
4670 elf32_arm_build_stubs (struct bfd_link_info *info)
4673 struct bfd_hash_table *table;
4674 struct elf32_arm_link_hash_table *htab;
4676 htab = elf32_arm_hash_table (info);
4678 for (stub_sec = htab->stub_bfd->sections;
4680 stub_sec = stub_sec->next)
4684 /* Ignore non-stub sections. */
4685 if (!strstr (stub_sec->name, STUB_SUFFIX))
4688 /* Allocate memory to hold the linker stubs. */
4689 size = stub_sec->size;
4690 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4691 if (stub_sec->contents == NULL && size != 0)
4696 /* Build the stubs as directed by the stub hash table. */
4697 table = &htab->stub_hash_table;
4698 bfd_hash_traverse (table, arm_build_one_stub, info);
4703 /* Locate the Thumb encoded calling stub for NAME. */
4705 static struct elf_link_hash_entry *
4706 find_thumb_glue (struct bfd_link_info *link_info,
4708 char **error_message)
4711 struct elf_link_hash_entry *hash;
4712 struct elf32_arm_link_hash_table *hash_table;
4714 /* We need a pointer to the armelf specific hash table. */
4715 hash_table = elf32_arm_hash_table (link_info);
4717 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4718 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4720 BFD_ASSERT (tmp_name);
4722 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4724 hash = elf_link_hash_lookup
4725 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4728 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4729 tmp_name, name) == -1)
4730 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4737 /* Locate the ARM encoded calling stub for NAME. */
4739 static struct elf_link_hash_entry *
4740 find_arm_glue (struct bfd_link_info *link_info,
4742 char **error_message)
4745 struct elf_link_hash_entry *myh;
4746 struct elf32_arm_link_hash_table *hash_table;
4748 /* We need a pointer to the elfarm specific hash table. */
4749 hash_table = elf32_arm_hash_table (link_info);
4751 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4752 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4754 BFD_ASSERT (tmp_name);
4756 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4758 myh = elf_link_hash_lookup
4759 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4762 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4763 tmp_name, name) == -1)
4764 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4771 /* ARM->Thumb glue (static images):
4775 ldr r12, __func_addr
4778 .word func @ behave as if you saw a ARM_32 reloc.
4785 .word func @ behave as if you saw a ARM_32 reloc.
4787 (relocatable images)
4790 ldr r12, __func_offset
4796 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4797 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4798 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4799 static const insn32 a2t3_func_addr_insn = 0x00000001;
4801 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4802 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4803 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4805 #define ARM2THUMB_PIC_GLUE_SIZE 16
4806 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4807 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4808 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4810 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4814 __func_from_thumb: __func_from_thumb:
4816 nop ldr r6, __func_addr
4826 #define THUMB2ARM_GLUE_SIZE 8
4827 static const insn16 t2a1_bx_pc_insn = 0x4778;
4828 static const insn16 t2a2_noop_insn = 0x46c0;
4829 static const insn32 t2a3_b_insn = 0xea000000;
4831 #define VFP11_ERRATUM_VENEER_SIZE 8
4833 #define ARM_BX_VENEER_SIZE 12
4834 static const insn32 armbx1_tst_insn = 0xe3100001;
4835 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4836 static const insn32 armbx3_bx_insn = 0xe12fff10;
4838 #ifndef ELFARM_NABI_C_INCLUDED
4840 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4843 bfd_byte * contents;
4847 /* Do not include empty glue sections in the output. */
4850 s = bfd_get_section_by_name (abfd, name);
4852 s->flags |= SEC_EXCLUDE;
4857 BFD_ASSERT (abfd != NULL);
4859 s = bfd_get_section_by_name (abfd, name);
4860 BFD_ASSERT (s != NULL);
4862 contents = bfd_alloc (abfd, size);
4864 BFD_ASSERT (s->size == size);
4865 s->contents = contents;
4869 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4871 struct elf32_arm_link_hash_table * globals;
4873 globals = elf32_arm_hash_table (info);
4874 BFD_ASSERT (globals != NULL);
4876 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4877 globals->arm_glue_size,
4878 ARM2THUMB_GLUE_SECTION_NAME);
4880 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4881 globals->thumb_glue_size,
4882 THUMB2ARM_GLUE_SECTION_NAME);
4884 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4885 globals->vfp11_erratum_glue_size,
4886 VFP11_ERRATUM_VENEER_SECTION_NAME);
4888 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4889 globals->bx_glue_size,
4890 ARM_BX_GLUE_SECTION_NAME);
4895 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4896 returns the symbol identifying the stub. */
4898 static struct elf_link_hash_entry *
4899 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4900 struct elf_link_hash_entry * h)
4902 const char * name = h->root.root.string;
4905 struct elf_link_hash_entry * myh;
4906 struct bfd_link_hash_entry * bh;
4907 struct elf32_arm_link_hash_table * globals;
4911 globals = elf32_arm_hash_table (link_info);
4913 BFD_ASSERT (globals != NULL);
4914 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4916 s = bfd_get_section_by_name
4917 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4919 BFD_ASSERT (s != NULL);
4921 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4923 BFD_ASSERT (tmp_name);
4925 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4927 myh = elf_link_hash_lookup
4928 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4932 /* We've already seen this guy. */
4937 /* The only trick here is using hash_table->arm_glue_size as the value.
4938 Even though the section isn't allocated yet, this is where we will be
4939 putting it. The +1 on the value marks that the stub has not been
4940 output yet - not that it is a Thumb function. */
4942 val = globals->arm_glue_size + 1;
4943 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4944 tmp_name, BSF_GLOBAL, s, val,
4945 NULL, TRUE, FALSE, &bh);
4947 myh = (struct elf_link_hash_entry *) bh;
4948 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4949 myh->forced_local = 1;
4953 if (link_info->shared || globals->root.is_relocatable_executable
4954 || globals->pic_veneer)
4955 size = ARM2THUMB_PIC_GLUE_SIZE;
4956 else if (globals->use_blx)
4957 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4959 size = ARM2THUMB_STATIC_GLUE_SIZE;
4962 globals->arm_glue_size += size;
4967 /* Allocate space for ARMv4 BX veneers. */
4970 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4973 struct elf32_arm_link_hash_table *globals;
4975 struct elf_link_hash_entry *myh;
4976 struct bfd_link_hash_entry *bh;
4979 /* BX PC does not need a veneer. */
4983 globals = elf32_arm_hash_table (link_info);
4985 BFD_ASSERT (globals != NULL);
4986 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4988 /* Check if this veneer has already been allocated. */
4989 if (globals->bx_glue_offset[reg])
4992 s = bfd_get_section_by_name
4993 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4995 BFD_ASSERT (s != NULL);
4997 /* Add symbol for veneer. */
4998 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5000 BFD_ASSERT (tmp_name);
5002 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5004 myh = elf_link_hash_lookup
5005 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5007 BFD_ASSERT (myh == NULL);
5010 val = globals->bx_glue_size;
5011 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5012 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5013 NULL, TRUE, FALSE, &bh);
5015 myh = (struct elf_link_hash_entry *) bh;
5016 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5017 myh->forced_local = 1;
5019 s->size += ARM_BX_VENEER_SIZE;
5020 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5021 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5025 /* Add an entry to the code/data map for section SEC. */
5028 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5030 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5031 unsigned int newidx;
5033 if (sec_data->map == NULL)
5035 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5036 sec_data->mapcount = 0;
5037 sec_data->mapsize = 1;
5040 newidx = sec_data->mapcount++;
5042 if (sec_data->mapcount > sec_data->mapsize)
5044 sec_data->mapsize *= 2;
5045 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5046 * sizeof (elf32_arm_section_map));
5051 sec_data->map[newidx].vma = vma;
5052 sec_data->map[newidx].type = type;
5057 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5058 veneers are handled for now. */
5061 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5062 elf32_vfp11_erratum_list *branch,
5064 asection *branch_sec,
5065 unsigned int offset)
5068 struct elf32_arm_link_hash_table *hash_table;
5070 struct elf_link_hash_entry *myh;
5071 struct bfd_link_hash_entry *bh;
5073 struct _arm_elf_section_data *sec_data;
5075 elf32_vfp11_erratum_list *newerr;
5077 hash_table = elf32_arm_hash_table (link_info);
5079 BFD_ASSERT (hash_table != NULL);
5080 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5082 s = bfd_get_section_by_name
5083 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5085 sec_data = elf32_arm_section_data (s);
5087 BFD_ASSERT (s != NULL);
5089 tmp_name = bfd_malloc ((bfd_size_type) strlen
5090 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5092 BFD_ASSERT (tmp_name);
5094 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5095 hash_table->num_vfp11_fixes);
5097 myh = elf_link_hash_lookup
5098 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5100 BFD_ASSERT (myh == NULL);
5103 val = hash_table->vfp11_erratum_glue_size;
5104 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5105 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5106 NULL, TRUE, FALSE, &bh);
5108 myh = (struct elf_link_hash_entry *) bh;
5109 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5110 myh->forced_local = 1;
5112 /* Link veneer back to calling location. */
5113 errcount = ++(sec_data->erratumcount);
5114 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5116 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5118 newerr->u.v.branch = branch;
5119 newerr->u.v.id = hash_table->num_vfp11_fixes;
5120 branch->u.b.veneer = newerr;
5122 newerr->next = sec_data->erratumlist;
5123 sec_data->erratumlist = newerr;
5125 /* A symbol for the return from the veneer. */
5126 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5127 hash_table->num_vfp11_fixes);
5129 myh = elf_link_hash_lookup
5130 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5137 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5138 branch_sec, val, NULL, TRUE, FALSE, &bh);
5140 myh = (struct elf_link_hash_entry *) bh;
5141 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5142 myh->forced_local = 1;
5146 /* Generate a mapping symbol for the veneer section, and explicitly add an
5147 entry for that symbol to the code/data map for the section. */
5148 if (hash_table->vfp11_erratum_glue_size == 0)
5151 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5152 ever requires this erratum fix. */
5153 _bfd_generic_link_add_one_symbol (link_info,
5154 hash_table->bfd_of_glue_owner, "$a",
5155 BSF_LOCAL, s, 0, NULL,
5158 myh = (struct elf_link_hash_entry *) bh;
5159 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5160 myh->forced_local = 1;
5162 /* The elf32_arm_init_maps function only cares about symbols from input
5163 BFDs. We must make a note of this generated mapping symbol
5164 ourselves so that code byteswapping works properly in
5165 elf32_arm_write_section. */
5166 elf32_arm_section_map_add (s, 'a', 0);
5169 s->size += VFP11_ERRATUM_VENEER_SIZE;
5170 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5171 hash_table->num_vfp11_fixes++;
5173 /* The offset of the veneer. */
5177 #define ARM_GLUE_SECTION_FLAGS \
5178 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5179 | SEC_READONLY | SEC_LINKER_CREATED)
5181 /* Create a fake section for use by the ARM backend of the linker. */
5184 arm_make_glue_section (bfd * abfd, const char * name)
5188 sec = bfd_get_section_by_name (abfd, name);
5193 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5196 || !bfd_set_section_alignment (abfd, sec, 2))
5199 /* Set the gc mark to prevent the section from being removed by garbage
5200 collection, despite the fact that no relocs refer to this section. */
5206 /* Add the glue sections to ABFD. This function is called from the
5207 linker scripts in ld/emultempl/{armelf}.em. */
5210 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5211 struct bfd_link_info *info)
5213 /* If we are only performing a partial
5214 link do not bother adding the glue. */
5215 if (info->relocatable)
5218 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5219 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5220 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5221 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5224 /* Select a BFD to be used to hold the sections used by the glue code.
5225 This function is called from the linker scripts in ld/emultempl/
5229 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5231 struct elf32_arm_link_hash_table *globals;
5233 /* If we are only performing a partial link
5234 do not bother getting a bfd to hold the glue. */
5235 if (info->relocatable)
5238 /* Make sure we don't attach the glue sections to a dynamic object. */
5239 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5241 globals = elf32_arm_hash_table (info);
5243 BFD_ASSERT (globals != NULL);
5245 if (globals->bfd_of_glue_owner != NULL)
5248 /* Save the bfd for later use. */
5249 globals->bfd_of_glue_owner = abfd;
5255 check_use_blx (struct elf32_arm_link_hash_table *globals)
5257 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5259 globals->use_blx = 1;
5263 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5264 struct bfd_link_info *link_info)
5266 Elf_Internal_Shdr *symtab_hdr;
5267 Elf_Internal_Rela *internal_relocs = NULL;
5268 Elf_Internal_Rela *irel, *irelend;
5269 bfd_byte *contents = NULL;
5272 struct elf32_arm_link_hash_table *globals;
5274 /* If we are only performing a partial link do not bother
5275 to construct any glue. */
5276 if (link_info->relocatable)
5279 /* Here we have a bfd that is to be included on the link. We have a
5280 hook to do reloc rummaging, before section sizes are nailed down. */
5281 globals = elf32_arm_hash_table (link_info);
5283 BFD_ASSERT (globals != NULL);
5285 check_use_blx (globals);
5287 if (globals->byteswap_code && !bfd_big_endian (abfd))
5289 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5294 /* PR 5398: If we have not decided to include any loadable sections in
5295 the output then we will not have a glue owner bfd. This is OK, it
5296 just means that there is nothing else for us to do here. */
5297 if (globals->bfd_of_glue_owner == NULL)
5300 /* Rummage around all the relocs and map the glue vectors. */
5301 sec = abfd->sections;
5306 for (; sec != NULL; sec = sec->next)
5308 if (sec->reloc_count == 0)
5311 if ((sec->flags & SEC_EXCLUDE) != 0)
5314 symtab_hdr = & elf_symtab_hdr (abfd);
5316 /* Load the relocs. */
5318 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5320 if (internal_relocs == NULL)
5323 irelend = internal_relocs + sec->reloc_count;
5324 for (irel = internal_relocs; irel < irelend; irel++)
5327 unsigned long r_index;
5329 struct elf_link_hash_entry *h;
5331 r_type = ELF32_R_TYPE (irel->r_info);
5332 r_index = ELF32_R_SYM (irel->r_info);
5334 /* These are the only relocation types we care about. */
5335 if ( r_type != R_ARM_PC24
5336 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5339 /* Get the section contents if we haven't done so already. */
5340 if (contents == NULL)
5342 /* Get cached copy if it exists. */
5343 if (elf_section_data (sec)->this_hdr.contents != NULL)
5344 contents = elf_section_data (sec)->this_hdr.contents;
5347 /* Go get them off disk. */
5348 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5353 if (r_type == R_ARM_V4BX)
5357 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5358 record_arm_bx_glue (link_info, reg);
5362 /* If the relocation is not against a symbol it cannot concern us. */
5365 /* We don't care about local symbols. */
5366 if (r_index < symtab_hdr->sh_info)
5369 /* This is an external symbol. */
5370 r_index -= symtab_hdr->sh_info;
5371 h = (struct elf_link_hash_entry *)
5372 elf_sym_hashes (abfd)[r_index];
5374 /* If the relocation is against a static symbol it must be within
5375 the current section and so cannot be a cross ARM/Thumb relocation. */
5379 /* If the call will go through a PLT entry then we do not need
5381 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5387 /* This one is a call from arm code. We need to look up
5388 the target of the call. If it is a thumb target, we
5390 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5391 record_arm_to_thumb_glue (link_info, h);
5399 if (contents != NULL
5400 && elf_section_data (sec)->this_hdr.contents != contents)
5404 if (internal_relocs != NULL
5405 && elf_section_data (sec)->relocs != internal_relocs)
5406 free (internal_relocs);
5407 internal_relocs = NULL;
5413 if (contents != NULL
5414 && elf_section_data (sec)->this_hdr.contents != contents)
5416 if (internal_relocs != NULL
5417 && elf_section_data (sec)->relocs != internal_relocs)
5418 free (internal_relocs);
5425 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5428 bfd_elf32_arm_init_maps (bfd *abfd)
5430 Elf_Internal_Sym *isymbuf;
5431 Elf_Internal_Shdr *hdr;
5432 unsigned int i, localsyms;
5434 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5435 if (! is_arm_elf (abfd))
5438 if ((abfd->flags & DYNAMIC) != 0)
5441 hdr = & elf_symtab_hdr (abfd);
5442 localsyms = hdr->sh_info;
5444 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5445 should contain the number of local symbols, which should come before any
5446 global symbols. Mapping symbols are always local. */
5447 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5450 /* No internal symbols read? Skip this BFD. */
5451 if (isymbuf == NULL)
5454 for (i = 0; i < localsyms; i++)
5456 Elf_Internal_Sym *isym = &isymbuf[i];
5457 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5461 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5463 name = bfd_elf_string_from_elf_section (abfd,
5464 hdr->sh_link, isym->st_name);
5466 if (bfd_is_arm_special_symbol_name (name,
5467 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5468 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5474 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5475 say what they wanted. */
5478 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5480 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5481 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5483 if (globals->fix_cortex_a8 == -1)
5485 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5486 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5487 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5488 || out_attr[Tag_CPU_arch_profile].i == 0))
5489 globals->fix_cortex_a8 = 1;
5491 globals->fix_cortex_a8 = 0;
5497 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5499 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5500 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5502 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5503 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5505 switch (globals->vfp11_fix)
5507 case BFD_ARM_VFP11_FIX_DEFAULT:
5508 case BFD_ARM_VFP11_FIX_NONE:
5509 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5513 /* Give a warning, but do as the user requests anyway. */
5514 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5515 "workaround is not necessary for target architecture"), obfd);
5518 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5519 /* For earlier architectures, we might need the workaround, but do not
5520 enable it by default. If users is running with broken hardware, they
5521 must enable the erratum fix explicitly. */
5522 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5526 enum bfd_arm_vfp11_pipe
5534 /* Return a VFP register number. This is encoded as RX:X for single-precision
5535 registers, or X:RX for double-precision registers, where RX is the group of
5536 four bits in the instruction encoding and X is the single extension bit.
5537 RX and X fields are specified using their lowest (starting) bit. The return
5540 0...31: single-precision registers s0...s31
5541 32...63: double-precision registers d0...d31.
5543 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5544 encounter VFP3 instructions, so we allow the full range for DP registers. */
5547 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5551 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5553 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5556 /* Set bits in *WMASK according to a register number REG as encoded by
5557 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5560 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5565 *wmask |= 3 << ((reg - 32) * 2);
5568 /* Return TRUE if WMASK overwrites anything in REGS. */
5571 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5575 for (i = 0; i < numregs; i++)
5577 unsigned int reg = regs[i];
5579 if (reg < 32 && (wmask & (1 << reg)) != 0)
5587 if ((wmask & (3 << (reg * 2))) != 0)
5594 /* In this function, we're interested in two things: finding input registers
5595 for VFP data-processing instructions, and finding the set of registers which
5596 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5597 hold the written set, so FLDM etc. are easy to deal with (we're only
5598 interested in 32 SP registers or 16 dp registers, due to the VFP version
5599 implemented by the chip in question). DP registers are marked by setting
5600 both SP registers in the write mask). */
5602 static enum bfd_arm_vfp11_pipe
5603 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5606 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5607 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5609 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5612 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5613 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5615 pqrs = ((insn & 0x00800000) >> 20)
5616 | ((insn & 0x00300000) >> 19)
5617 | ((insn & 0x00000040) >> 6);
5621 case 0: /* fmac[sd]. */
5622 case 1: /* fnmac[sd]. */
5623 case 2: /* fmsc[sd]. */
5624 case 3: /* fnmsc[sd]. */
5626 bfd_arm_vfp11_write_mask (destmask, fd);
5628 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5633 case 4: /* fmul[sd]. */
5634 case 5: /* fnmul[sd]. */
5635 case 6: /* fadd[sd]. */
5636 case 7: /* fsub[sd]. */
5640 case 8: /* fdiv[sd]. */
5643 bfd_arm_vfp11_write_mask (destmask, fd);
5644 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5649 case 15: /* extended opcode. */
5651 unsigned int extn = ((insn >> 15) & 0x1e)
5652 | ((insn >> 7) & 1);
5656 case 0: /* fcpy[sd]. */
5657 case 1: /* fabs[sd]. */
5658 case 2: /* fneg[sd]. */
5659 case 8: /* fcmp[sd]. */
5660 case 9: /* fcmpe[sd]. */
5661 case 10: /* fcmpz[sd]. */
5662 case 11: /* fcmpez[sd]. */
5663 case 16: /* fuito[sd]. */
5664 case 17: /* fsito[sd]. */
5665 case 24: /* ftoui[sd]. */
5666 case 25: /* ftouiz[sd]. */
5667 case 26: /* ftosi[sd]. */
5668 case 27: /* ftosiz[sd]. */
5669 /* These instructions will not bounce due to underflow. */
5674 case 3: /* fsqrt[sd]. */
5675 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5676 registers to cause the erratum in previous instructions. */
5677 bfd_arm_vfp11_write_mask (destmask, fd);
5681 case 15: /* fcvt{ds,sd}. */
5685 bfd_arm_vfp11_write_mask (destmask, fd);
5687 /* Only FCVTSD can underflow. */
5688 if ((insn & 0x100) != 0)
5707 /* Two-register transfer. */
5708 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5710 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5712 if ((insn & 0x100000) == 0)
5715 bfd_arm_vfp11_write_mask (destmask, fm);
5718 bfd_arm_vfp11_write_mask (destmask, fm);
5719 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5725 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5727 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5728 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5732 case 0: /* Two-reg transfer. We should catch these above. */
5735 case 2: /* fldm[sdx]. */
5739 unsigned int i, offset = insn & 0xff;
5744 for (i = fd; i < fd + offset; i++)
5745 bfd_arm_vfp11_write_mask (destmask, i);
5749 case 4: /* fld[sd]. */
5751 bfd_arm_vfp11_write_mask (destmask, fd);
5760 /* Single-register transfer. Note L==0. */
5761 else if ((insn & 0x0f100e10) == 0x0e000a10)
5763 unsigned int opcode = (insn >> 21) & 7;
5764 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5768 case 0: /* fmsr/fmdlr. */
5769 case 1: /* fmdhr. */
5770 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5771 destination register. I don't know if this is exactly right,
5772 but it is the conservative choice. */
5773 bfd_arm_vfp11_write_mask (destmask, fn);
5787 static int elf32_arm_compare_mapping (const void * a, const void * b);
5790 /* Look for potentially-troublesome code sequences which might trigger the
5791 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5792 (available from ARM) for details of the erratum. A short version is
5793 described in ld.texinfo. */
5796 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5799 bfd_byte *contents = NULL;
5801 int regs[3], numregs = 0;
5802 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5803 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5805 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5806 The states transition as follows:
5808 0 -> 1 (vector) or 0 -> 2 (scalar)
5809 A VFP FMAC-pipeline instruction has been seen. Fill
5810 regs[0]..regs[numregs-1] with its input operands. Remember this
5811 instruction in 'first_fmac'.
5814 Any instruction, except for a VFP instruction which overwrites
5819 A VFP instruction has been seen which overwrites any of regs[*].
5820 We must make a veneer! Reset state to 0 before examining next
5824 If we fail to match anything in state 2, reset to state 0 and reset
5825 the instruction pointer to the instruction after 'first_fmac'.
5827 If the VFP11 vector mode is in use, there must be at least two unrelated
5828 instructions between anti-dependent VFP11 instructions to properly avoid
5829 triggering the erratum, hence the use of the extra state 1. */
5831 /* If we are only performing a partial link do not bother
5832 to construct any glue. */
5833 if (link_info->relocatable)
5836 /* Skip if this bfd does not correspond to an ELF image. */
5837 if (! is_arm_elf (abfd))
5840 /* We should have chosen a fix type by the time we get here. */
5841 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5843 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5846 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5847 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5850 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5852 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5853 struct _arm_elf_section_data *sec_data;
5855 /* If we don't have executable progbits, we're not interested in this
5856 section. Also skip if section is to be excluded. */
5857 if (elf_section_type (sec) != SHT_PROGBITS
5858 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5859 || (sec->flags & SEC_EXCLUDE) != 0
5860 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5861 || sec->output_section == bfd_abs_section_ptr
5862 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5865 sec_data = elf32_arm_section_data (sec);
5867 if (sec_data->mapcount == 0)
5870 if (elf_section_data (sec)->this_hdr.contents != NULL)
5871 contents = elf_section_data (sec)->this_hdr.contents;
5872 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5875 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5876 elf32_arm_compare_mapping);
5878 for (span = 0; span < sec_data->mapcount; span++)
5880 unsigned int span_start = sec_data->map[span].vma;
5881 unsigned int span_end = (span == sec_data->mapcount - 1)
5882 ? sec->size : sec_data->map[span + 1].vma;
5883 char span_type = sec_data->map[span].type;
5885 /* FIXME: Only ARM mode is supported at present. We may need to
5886 support Thumb-2 mode also at some point. */
5887 if (span_type != 'a')
5890 for (i = span_start; i < span_end;)
5892 unsigned int next_i = i + 4;
5893 unsigned int insn = bfd_big_endian (abfd)
5894 ? (contents[i] << 24)
5895 | (contents[i + 1] << 16)
5896 | (contents[i + 2] << 8)
5898 : (contents[i + 3] << 24)
5899 | (contents[i + 2] << 16)
5900 | (contents[i + 1] << 8)
5902 unsigned int writemask = 0;
5903 enum bfd_arm_vfp11_pipe pipe;
5908 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5910 /* I'm assuming the VFP11 erratum can trigger with denorm
5911 operands on either the FMAC or the DS pipeline. This might
5912 lead to slightly overenthusiastic veneer insertion. */
5913 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5915 state = use_vector ? 1 : 2;
5917 veneer_of_insn = insn;
5923 int other_regs[3], other_numregs;
5924 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5927 if (pipe != VFP11_BAD
5928 && bfd_arm_vfp11_antidependency (writemask, regs,
5938 int other_regs[3], other_numregs;
5939 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5942 if (pipe != VFP11_BAD
5943 && bfd_arm_vfp11_antidependency (writemask, regs,
5949 next_i = first_fmac + 4;
5955 abort (); /* Should be unreachable. */
5960 elf32_vfp11_erratum_list *newerr
5961 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5964 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5966 newerr->u.b.vfp_insn = veneer_of_insn;
5971 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5978 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5983 newerr->next = sec_data->erratumlist;
5984 sec_data->erratumlist = newerr;
5993 if (contents != NULL
5994 && elf_section_data (sec)->this_hdr.contents != contents)
6002 if (contents != NULL
6003 && elf_section_data (sec)->this_hdr.contents != contents)
6009 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6010 after sections have been laid out, using specially-named symbols. */
6013 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6014 struct bfd_link_info *link_info)
6017 struct elf32_arm_link_hash_table *globals;
6020 if (link_info->relocatable)
6023 /* Skip if this bfd does not correspond to an ELF image. */
6024 if (! is_arm_elf (abfd))
6027 globals = elf32_arm_hash_table (link_info);
6029 tmp_name = bfd_malloc ((bfd_size_type) strlen
6030 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6032 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6034 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6035 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6037 for (; errnode != NULL; errnode = errnode->next)
6039 struct elf_link_hash_entry *myh;
6042 switch (errnode->type)
6044 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6045 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6046 /* Find veneer symbol. */
6047 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6048 errnode->u.b.veneer->u.v.id);
6050 myh = elf_link_hash_lookup
6051 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6054 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6055 "`%s'"), abfd, tmp_name);
6057 vma = myh->root.u.def.section->output_section->vma
6058 + myh->root.u.def.section->output_offset
6059 + myh->root.u.def.value;
6061 errnode->u.b.veneer->vma = vma;
6064 case VFP11_ERRATUM_ARM_VENEER:
6065 case VFP11_ERRATUM_THUMB_VENEER:
6066 /* Find return location. */
6067 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6070 myh = elf_link_hash_lookup
6071 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6074 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6075 "`%s'"), abfd, tmp_name);
6077 vma = myh->root.u.def.section->output_section->vma
6078 + myh->root.u.def.section->output_offset
6079 + myh->root.u.def.value;
6081 errnode->u.v.branch->vma = vma;
6094 /* Set target relocation values needed during linking. */
6097 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6098 struct bfd_link_info *link_info,
6100 char * target2_type,
6103 bfd_arm_vfp11_fix vfp11_fix,
6104 int no_enum_warn, int no_wchar_warn,
6105 int pic_veneer, int fix_cortex_a8)
6107 struct elf32_arm_link_hash_table *globals;
6109 globals = elf32_arm_hash_table (link_info);
6111 globals->target1_is_rel = target1_is_rel;
6112 if (strcmp (target2_type, "rel") == 0)
6113 globals->target2_reloc = R_ARM_REL32;
6114 else if (strcmp (target2_type, "abs") == 0)
6115 globals->target2_reloc = R_ARM_ABS32;
6116 else if (strcmp (target2_type, "got-rel") == 0)
6117 globals->target2_reloc = R_ARM_GOT_PREL;
6120 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6123 globals->fix_v4bx = fix_v4bx;
6124 globals->use_blx |= use_blx;
6125 globals->vfp11_fix = vfp11_fix;
6126 globals->pic_veneer = pic_veneer;
6127 globals->fix_cortex_a8 = fix_cortex_a8;
6129 BFD_ASSERT (is_arm_elf (output_bfd));
6130 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6131 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6134 /* Replace the target offset of a Thumb bl or b.w instruction. */
6137 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6143 BFD_ASSERT ((offset & 1) == 0);
6145 upper = bfd_get_16 (abfd, insn);
6146 lower = bfd_get_16 (abfd, insn + 2);
6147 reloc_sign = (offset < 0) ? 1 : 0;
6148 upper = (upper & ~(bfd_vma) 0x7ff)
6149 | ((offset >> 12) & 0x3ff)
6150 | (reloc_sign << 10);
6151 lower = (lower & ~(bfd_vma) 0x2fff)
6152 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6153 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6154 | ((offset >> 1) & 0x7ff);
6155 bfd_put_16 (abfd, upper, insn);
6156 bfd_put_16 (abfd, lower, insn + 2);
6159 /* Thumb code calling an ARM function. */
6162 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6166 asection * input_section,
6167 bfd_byte * hit_data,
6170 bfd_signed_vma addend,
6172 char **error_message)
6176 long int ret_offset;
6177 struct elf_link_hash_entry * myh;
6178 struct elf32_arm_link_hash_table * globals;
6180 myh = find_thumb_glue (info, name, error_message);
6184 globals = elf32_arm_hash_table (info);
6186 BFD_ASSERT (globals != NULL);
6187 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6189 my_offset = myh->root.u.def.value;
6191 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6192 THUMB2ARM_GLUE_SECTION_NAME);
6194 BFD_ASSERT (s != NULL);
6195 BFD_ASSERT (s->contents != NULL);
6196 BFD_ASSERT (s->output_section != NULL);
6198 if ((my_offset & 0x01) == 0x01)
6201 && sym_sec->owner != NULL
6202 && !INTERWORK_FLAG (sym_sec->owner))
6204 (*_bfd_error_handler)
6205 (_("%B(%s): warning: interworking not enabled.\n"
6206 " first occurrence: %B: thumb call to arm"),
6207 sym_sec->owner, input_bfd, name);
6213 myh->root.u.def.value = my_offset;
6215 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6216 s->contents + my_offset);
6218 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6219 s->contents + my_offset + 2);
6222 /* Address of destination of the stub. */
6223 ((bfd_signed_vma) val)
6225 /* Offset from the start of the current section
6226 to the start of the stubs. */
6228 /* Offset of the start of this stub from the start of the stubs. */
6230 /* Address of the start of the current section. */
6231 + s->output_section->vma)
6232 /* The branch instruction is 4 bytes into the stub. */
6234 /* ARM branches work from the pc of the instruction + 8. */
6237 put_arm_insn (globals, output_bfd,
6238 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6239 s->contents + my_offset + 4);
6242 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6244 /* Now go back and fix up the original BL insn to point to here. */
6246 /* Address of where the stub is located. */
6247 (s->output_section->vma + s->output_offset + my_offset)
6248 /* Address of where the BL is located. */
6249 - (input_section->output_section->vma + input_section->output_offset
6251 /* Addend in the relocation. */
6253 /* Biassing for PC-relative addressing. */
6256 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6261 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6263 static struct elf_link_hash_entry *
6264 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6271 char ** error_message)
6274 long int ret_offset;
6275 struct elf_link_hash_entry * myh;
6276 struct elf32_arm_link_hash_table * globals;
6278 myh = find_arm_glue (info, name, error_message);
6282 globals = elf32_arm_hash_table (info);
6284 BFD_ASSERT (globals != NULL);
6285 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6287 my_offset = myh->root.u.def.value;
6289 if ((my_offset & 0x01) == 0x01)
6292 && sym_sec->owner != NULL
6293 && !INTERWORK_FLAG (sym_sec->owner))
6295 (*_bfd_error_handler)
6296 (_("%B(%s): warning: interworking not enabled.\n"
6297 " first occurrence: %B: arm call to thumb"),
6298 sym_sec->owner, input_bfd, name);
6302 myh->root.u.def.value = my_offset;
6304 if (info->shared || globals->root.is_relocatable_executable
6305 || globals->pic_veneer)
6307 /* For relocatable objects we can't use absolute addresses,
6308 so construct the address from a relative offset. */
6309 /* TODO: If the offset is small it's probably worth
6310 constructing the address with adds. */
6311 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6312 s->contents + my_offset);
6313 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6314 s->contents + my_offset + 4);
6315 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6316 s->contents + my_offset + 8);
6317 /* Adjust the offset by 4 for the position of the add,
6318 and 8 for the pipeline offset. */
6319 ret_offset = (val - (s->output_offset
6320 + s->output_section->vma
6323 bfd_put_32 (output_bfd, ret_offset,
6324 s->contents + my_offset + 12);
6326 else if (globals->use_blx)
6328 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6329 s->contents + my_offset);
6331 /* It's a thumb address. Add the low order bit. */
6332 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6333 s->contents + my_offset + 4);
6337 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6338 s->contents + my_offset);
6340 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6341 s->contents + my_offset + 4);
6343 /* It's a thumb address. Add the low order bit. */
6344 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6345 s->contents + my_offset + 8);
6351 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6356 /* Arm code calling a Thumb function. */
6359 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6363 asection * input_section,
6364 bfd_byte * hit_data,
6367 bfd_signed_vma addend,
6369 char **error_message)
6371 unsigned long int tmp;
6374 long int ret_offset;
6375 struct elf_link_hash_entry * myh;
6376 struct elf32_arm_link_hash_table * globals;
6378 globals = elf32_arm_hash_table (info);
6380 BFD_ASSERT (globals != NULL);
6381 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6383 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6384 ARM2THUMB_GLUE_SECTION_NAME);
6385 BFD_ASSERT (s != NULL);
6386 BFD_ASSERT (s->contents != NULL);
6387 BFD_ASSERT (s->output_section != NULL);
6389 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6390 sym_sec, val, s, error_message);
6394 my_offset = myh->root.u.def.value;
6395 tmp = bfd_get_32 (input_bfd, hit_data);
6396 tmp = tmp & 0xFF000000;
6398 /* Somehow these are both 4 too far, so subtract 8. */
6399 ret_offset = (s->output_offset
6401 + s->output_section->vma
6402 - (input_section->output_offset
6403 + input_section->output_section->vma
6407 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6409 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6414 /* Populate Arm stub for an exported Thumb function. */
6417 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6419 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6421 struct elf_link_hash_entry * myh;
6422 struct elf32_arm_link_hash_entry *eh;
6423 struct elf32_arm_link_hash_table * globals;
6426 char *error_message;
6428 eh = elf32_arm_hash_entry (h);
6429 /* Allocate stubs for exported Thumb functions on v4t. */
6430 if (eh->export_glue == NULL)
6433 globals = elf32_arm_hash_table (info);
6435 BFD_ASSERT (globals != NULL);
6436 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6438 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6439 ARM2THUMB_GLUE_SECTION_NAME);
6440 BFD_ASSERT (s != NULL);
6441 BFD_ASSERT (s->contents != NULL);
6442 BFD_ASSERT (s->output_section != NULL);
6444 sec = eh->export_glue->root.u.def.section;
6446 BFD_ASSERT (sec->output_section != NULL);
6448 val = eh->export_glue->root.u.def.value + sec->output_offset
6449 + sec->output_section->vma;
6451 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6452 h->root.u.def.section->owner,
6453 globals->obfd, sec, val, s,
6459 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6462 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6467 struct elf32_arm_link_hash_table *globals;
6469 globals = elf32_arm_hash_table (info);
6471 BFD_ASSERT (globals != NULL);
6472 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6474 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6475 ARM_BX_GLUE_SECTION_NAME);
6476 BFD_ASSERT (s != NULL);
6477 BFD_ASSERT (s->contents != NULL);
6478 BFD_ASSERT (s->output_section != NULL);
6480 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6482 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6484 if ((globals->bx_glue_offset[reg] & 1) == 0)
6486 p = s->contents + glue_addr;
6487 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6488 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6489 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6490 globals->bx_glue_offset[reg] |= 1;
6493 return glue_addr + s->output_section->vma + s->output_offset;
6496 /* Generate Arm stubs for exported Thumb symbols. */
6498 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6499 struct bfd_link_info *link_info)
6501 struct elf32_arm_link_hash_table * globals;
6503 if (link_info == NULL)
6504 /* Ignore this if we are not called by the ELF backend linker. */
6507 globals = elf32_arm_hash_table (link_info);
6508 /* If blx is available then exported Thumb symbols are OK and there is
6510 if (globals->use_blx)
6513 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6517 /* Some relocations map to different relocations depending on the
6518 target. Return the real relocation. */
6521 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6527 if (globals->target1_is_rel)
6533 return globals->target2_reloc;
6540 /* Return the base VMA address which should be subtracted from real addresses
6541 when resolving @dtpoff relocation.
6542 This is PT_TLS segment p_vaddr. */
6545 dtpoff_base (struct bfd_link_info *info)
6547 /* If tls_sec is NULL, we should have signalled an error already. */
6548 if (elf_hash_table (info)->tls_sec == NULL)
6550 return elf_hash_table (info)->tls_sec->vma;
6553 /* Return the relocation value for @tpoff relocation
6554 if STT_TLS virtual address is ADDRESS. */
6557 tpoff (struct bfd_link_info *info, bfd_vma address)
6559 struct elf_link_hash_table *htab = elf_hash_table (info);
6562 /* If tls_sec is NULL, we should have signalled an error already. */
6563 if (htab->tls_sec == NULL)
6565 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6566 return address - htab->tls_sec->vma + base;
6569 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6570 VALUE is the relocation value. */
6572 static bfd_reloc_status_type
6573 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6576 return bfd_reloc_overflow;
6578 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6579 bfd_put_32 (abfd, value, data);
6580 return bfd_reloc_ok;
6583 /* For a given value of n, calculate the value of G_n as required to
6584 deal with group relocations. We return it in the form of an
6585 encoded constant-and-rotation, together with the final residual. If n is
6586 specified as less than zero, then final_residual is filled with the
6587 input value and no further action is performed. */
6590 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6594 bfd_vma encoded_g_n = 0;
6595 bfd_vma residual = value; /* Also known as Y_n. */
6597 for (current_n = 0; current_n <= n; current_n++)
6601 /* Calculate which part of the value to mask. */
6608 /* Determine the most significant bit in the residual and
6609 align the resulting value to a 2-bit boundary. */
6610 for (msb = 30; msb >= 0; msb -= 2)
6611 if (residual & (3 << msb))
6614 /* The desired shift is now (msb - 6), or zero, whichever
6621 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6622 g_n = residual & (0xff << shift);
6623 encoded_g_n = (g_n >> shift)
6624 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6626 /* Calculate the residual for the next time around. */
6630 *final_residual = residual;
6635 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6636 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6639 identify_add_or_sub (bfd_vma insn)
6641 int opcode = insn & 0x1e00000;
6643 if (opcode == 1 << 23) /* ADD */
6646 if (opcode == 1 << 22) /* SUB */
6652 /* Perform a relocation as part of a final link. */
6654 static bfd_reloc_status_type
6655 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6658 asection * input_section,
6659 bfd_byte * contents,
6660 Elf_Internal_Rela * rel,
6662 struct bfd_link_info * info,
6664 const char * sym_name,
6666 struct elf_link_hash_entry * h,
6667 bfd_boolean * unresolved_reloc_p,
6668 char ** error_message)
6670 unsigned long r_type = howto->type;
6671 unsigned long r_symndx;
6672 bfd_byte * hit_data = contents + rel->r_offset;
6673 bfd * dynobj = NULL;
6674 Elf_Internal_Shdr * symtab_hdr;
6675 struct elf_link_hash_entry ** sym_hashes;
6676 bfd_vma * local_got_offsets;
6677 asection * sgot = NULL;
6678 asection * splt = NULL;
6679 asection * sreloc = NULL;
6681 bfd_signed_vma signed_addend;
6682 struct elf32_arm_link_hash_table * globals;
6684 globals = elf32_arm_hash_table (info);
6686 BFD_ASSERT (is_arm_elf (input_bfd));
6688 /* Some relocation types map to different relocations depending on the
6689 target. We pick the right one here. */
6690 r_type = arm_real_reloc_type (globals, r_type);
6691 if (r_type != howto->type)
6692 howto = elf32_arm_howto_from_type (r_type);
6694 /* If the start address has been set, then set the EF_ARM_HASENTRY
6695 flag. Setting this more than once is redundant, but the cost is
6696 not too high, and it keeps the code simple.
6698 The test is done here, rather than somewhere else, because the
6699 start address is only set just before the final link commences.
6701 Note - if the user deliberately sets a start address of 0, the
6702 flag will not be set. */
6703 if (bfd_get_start_address (output_bfd) != 0)
6704 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6706 dynobj = elf_hash_table (info)->dynobj;
6709 sgot = bfd_get_section_by_name (dynobj, ".got");
6710 splt = bfd_get_section_by_name (dynobj, ".plt");
6712 symtab_hdr = & elf_symtab_hdr (input_bfd);
6713 sym_hashes = elf_sym_hashes (input_bfd);
6714 local_got_offsets = elf_local_got_offsets (input_bfd);
6715 r_symndx = ELF32_R_SYM (rel->r_info);
6717 if (globals->use_rel)
6719 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6721 if (addend & ((howto->src_mask + 1) >> 1))
6724 signed_addend &= ~ howto->src_mask;
6725 signed_addend |= addend;
6728 signed_addend = addend;
6731 addend = signed_addend = rel->r_addend;
6736 /* We don't need to find a value for this symbol. It's just a
6738 *unresolved_reloc_p = FALSE;
6739 return bfd_reloc_ok;
6742 if (!globals->vxworks_p)
6743 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6747 case R_ARM_ABS32_NOI:
6749 case R_ARM_REL32_NOI:
6755 /* Handle relocations which should use the PLT entry. ABS32/REL32
6756 will use the symbol's value, which may point to a PLT entry, but we
6757 don't need to handle that here. If we created a PLT entry, all
6758 branches in this object should go to it, except if the PLT is too
6759 far away, in which case a long branch stub should be inserted. */
6760 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6761 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6762 && r_type != R_ARM_CALL
6763 && r_type != R_ARM_JUMP24
6764 && r_type != R_ARM_PLT32)
6767 && h->plt.offset != (bfd_vma) -1)
6769 /* If we've created a .plt section, and assigned a PLT entry to
6770 this function, it should not be known to bind locally. If
6771 it were, we would have cleared the PLT entry. */
6772 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6774 value = (splt->output_section->vma
6775 + splt->output_offset
6777 *unresolved_reloc_p = FALSE;
6778 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6779 contents, rel->r_offset, value,
6783 /* When generating a shared object or relocatable executable, these
6784 relocations are copied into the output file to be resolved at
6786 if ((info->shared || globals->root.is_relocatable_executable)
6787 && (input_section->flags & SEC_ALLOC)
6788 && !(elf32_arm_hash_table (info)->vxworks_p
6789 && strcmp (input_section->output_section->name,
6791 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6792 || !SYMBOL_CALLS_LOCAL (info, h))
6794 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6795 || h->root.type != bfd_link_hash_undefweak)
6796 && r_type != R_ARM_PC24
6797 && r_type != R_ARM_CALL
6798 && r_type != R_ARM_JUMP24
6799 && r_type != R_ARM_PREL31
6800 && r_type != R_ARM_PLT32)
6802 Elf_Internal_Rela outrel;
6804 bfd_boolean skip, relocate;
6806 *unresolved_reloc_p = FALSE;
6810 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6811 ! globals->use_rel);
6814 return bfd_reloc_notsupported;
6820 outrel.r_addend = addend;
6822 _bfd_elf_section_offset (output_bfd, info, input_section,
6824 if (outrel.r_offset == (bfd_vma) -1)
6826 else if (outrel.r_offset == (bfd_vma) -2)
6827 skip = TRUE, relocate = TRUE;
6828 outrel.r_offset += (input_section->output_section->vma
6829 + input_section->output_offset);
6832 memset (&outrel, 0, sizeof outrel);
6837 || !h->def_regular))
6838 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6843 /* This symbol is local, or marked to become local. */
6844 if (sym_flags == STT_ARM_TFUNC)
6846 if (globals->symbian_p)
6850 /* On Symbian OS, the data segment and text segement
6851 can be relocated independently. Therefore, we
6852 must indicate the segment to which this
6853 relocation is relative. The BPABI allows us to
6854 use any symbol in the right segment; we just use
6855 the section symbol as it is convenient. (We
6856 cannot use the symbol given by "h" directly as it
6857 will not appear in the dynamic symbol table.)
6859 Note that the dynamic linker ignores the section
6860 symbol value, so we don't subtract osec->vma
6861 from the emitted reloc addend. */
6863 osec = sym_sec->output_section;
6865 osec = input_section->output_section;
6866 symbol = elf_section_data (osec)->dynindx;
6869 struct elf_link_hash_table *htab = elf_hash_table (info);
6871 if ((osec->flags & SEC_READONLY) == 0
6872 && htab->data_index_section != NULL)
6873 osec = htab->data_index_section;
6875 osec = htab->text_index_section;
6876 symbol = elf_section_data (osec)->dynindx;
6878 BFD_ASSERT (symbol != 0);
6881 /* On SVR4-ish systems, the dynamic loader cannot
6882 relocate the text and data segments independently,
6883 so the symbol does not matter. */
6885 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6886 if (globals->use_rel)
6889 outrel.r_addend += value;
6892 loc = sreloc->contents;
6893 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6894 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6896 /* If this reloc is against an external symbol, we do not want to
6897 fiddle with the addend. Otherwise, we need to include the symbol
6898 value so that it becomes an addend for the dynamic reloc. */
6900 return bfd_reloc_ok;
6902 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6903 contents, rel->r_offset, value,
6906 else switch (r_type)
6909 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6911 case R_ARM_XPC25: /* Arm BLX instruction. */
6914 case R_ARM_PC24: /* Arm B/BL instruction. */
6918 bfd_signed_vma branch_offset;
6919 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6921 if (r_type == R_ARM_XPC25)
6923 /* Check for Arm calling Arm function. */
6924 /* FIXME: Should we translate the instruction into a BL
6925 instruction instead ? */
6926 if (sym_flags != STT_ARM_TFUNC)
6927 (*_bfd_error_handler)
6928 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6930 h ? h->root.root.string : "(local)");
6932 else if (r_type == R_ARM_PC24)
6934 /* Check for Arm calling Thumb function. */
6935 if (sym_flags == STT_ARM_TFUNC)
6937 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6938 output_bfd, input_section,
6939 hit_data, sym_sec, rel->r_offset,
6940 signed_addend, value,
6942 return bfd_reloc_ok;
6944 return bfd_reloc_dangerous;
6948 /* Check if a stub has to be inserted because the
6949 destination is too far or we are changing mode. */
6950 if ( r_type == R_ARM_CALL
6951 || r_type == R_ARM_JUMP24
6952 || r_type == R_ARM_PLT32)
6954 /* If the call goes through a PLT entry, make sure to
6955 check distance to the right destination address. */
6956 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6958 value = (splt->output_section->vma
6959 + splt->output_offset
6961 *unresolved_reloc_p = FALSE;
6964 from = (input_section->output_section->vma
6965 + input_section->output_offset
6967 branch_offset = (bfd_signed_vma)(value - from);
6969 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6970 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6971 || ((sym_flags == STT_ARM_TFUNC)
6972 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6973 || (r_type == R_ARM_JUMP24)
6974 || (r_type == R_ARM_PLT32) ))
6977 /* The target is out of reach, so redirect the
6978 branch to the local stub for this function. */
6980 stub_entry = elf32_arm_get_stub_entry (input_section,
6983 if (stub_entry != NULL)
6984 value = (stub_entry->stub_offset
6985 + stub_entry->stub_sec->output_offset
6986 + stub_entry->stub_sec->output_section->vma);
6990 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6992 S is the address of the symbol in the relocation.
6993 P is address of the instruction being relocated.
6994 A is the addend (extracted from the instruction) in bytes.
6996 S is held in 'value'.
6997 P is the base address of the section containing the
6998 instruction plus the offset of the reloc into that
7000 (input_section->output_section->vma +
7001 input_section->output_offset +
7003 A is the addend, converted into bytes, ie:
7006 Note: None of these operations have knowledge of the pipeline
7007 size of the processor, thus it is up to the assembler to
7008 encode this information into the addend. */
7009 value -= (input_section->output_section->vma
7010 + input_section->output_offset);
7011 value -= rel->r_offset;
7012 if (globals->use_rel)
7013 value += (signed_addend << howto->size);
7015 /* RELA addends do not have to be adjusted by howto->size. */
7016 value += signed_addend;
7018 signed_addend = value;
7019 signed_addend >>= howto->rightshift;
7021 /* A branch to an undefined weak symbol is turned into a jump to
7022 the next instruction unless a PLT entry will be created. */
7023 if (h && h->root.type == bfd_link_hash_undefweak
7024 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7026 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7031 /* Perform a signed range check. */
7032 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7033 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7034 return bfd_reloc_overflow;
7036 addend = (value & 2);
7038 value = (signed_addend & howto->dst_mask)
7039 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7041 if (r_type == R_ARM_CALL)
7043 /* Set the H bit in the BLX instruction. */
7044 if (sym_flags == STT_ARM_TFUNC)
7049 value &= ~(bfd_vma)(1 << 24);
7052 /* Select the correct instruction (BL or BLX). */
7053 /* Only if we are not handling a BL to a stub. In this
7054 case, mode switching is performed by the stub. */
7055 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7059 value &= ~(bfd_vma)(1 << 28);
7069 if (sym_flags == STT_ARM_TFUNC)
7073 case R_ARM_ABS32_NOI:
7079 if (sym_flags == STT_ARM_TFUNC)
7081 value -= (input_section->output_section->vma
7082 + input_section->output_offset + rel->r_offset);
7085 case R_ARM_REL32_NOI:
7087 value -= (input_section->output_section->vma
7088 + input_section->output_offset + rel->r_offset);
7092 value -= (input_section->output_section->vma
7093 + input_section->output_offset + rel->r_offset);
7094 value += signed_addend;
7095 if (! h || h->root.type != bfd_link_hash_undefweak)
7097 /* Check for overflow. */
7098 if ((value ^ (value >> 1)) & (1 << 30))
7099 return bfd_reloc_overflow;
7101 value &= 0x7fffffff;
7102 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7103 if (sym_flags == STT_ARM_TFUNC)
7108 bfd_put_32 (input_bfd, value, hit_data);
7109 return bfd_reloc_ok;
7113 if ((long) value > 0x7f || (long) value < -0x80)
7114 return bfd_reloc_overflow;
7116 bfd_put_8 (input_bfd, value, hit_data);
7117 return bfd_reloc_ok;
7122 if ((long) value > 0x7fff || (long) value < -0x8000)
7123 return bfd_reloc_overflow;
7125 bfd_put_16 (input_bfd, value, hit_data);
7126 return bfd_reloc_ok;
7128 case R_ARM_THM_ABS5:
7129 /* Support ldr and str instructions for the thumb. */
7130 if (globals->use_rel)
7132 /* Need to refetch addend. */
7133 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7134 /* ??? Need to determine shift amount from operand size. */
7135 addend >>= howto->rightshift;
7139 /* ??? Isn't value unsigned? */
7140 if ((long) value > 0x1f || (long) value < -0x10)
7141 return bfd_reloc_overflow;
7143 /* ??? Value needs to be properly shifted into place first. */
7144 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7145 bfd_put_16 (input_bfd, value, hit_data);
7146 return bfd_reloc_ok;
7148 case R_ARM_THM_ALU_PREL_11_0:
7149 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7152 bfd_signed_vma relocation;
7154 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7155 | bfd_get_16 (input_bfd, hit_data + 2);
7157 if (globals->use_rel)
7159 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7160 | ((insn & (1 << 26)) >> 15);
7161 if (insn & 0xf00000)
7162 signed_addend = -signed_addend;
7165 relocation = value + signed_addend;
7166 relocation -= (input_section->output_section->vma
7167 + input_section->output_offset
7170 value = abs (relocation);
7172 if (value >= 0x1000)
7173 return bfd_reloc_overflow;
7175 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7176 | ((value & 0x700) << 4)
7177 | ((value & 0x800) << 15);
7181 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7182 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7184 return bfd_reloc_ok;
7188 /* PR 10073: This reloc is not generated by the GNU toolchain,
7189 but it is supported for compatibility with third party libraries
7190 generated by other compilers, specifically the ARM/IAR. */
7193 bfd_signed_vma relocation;
7195 insn = bfd_get_16 (input_bfd, hit_data);
7197 if (globals->use_rel)
7198 addend = (insn & 0x00ff) << 2;
7200 relocation = value + addend;
7201 relocation -= (input_section->output_section->vma
7202 + input_section->output_offset
7205 value = abs (relocation);
7207 /* We do not check for overflow of this reloc. Although strictly
7208 speaking this is incorrect, it appears to be necessary in order
7209 to work with IAR generated relocs. Since GCC and GAS do not
7210 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7211 a problem for them. */
7214 insn = (insn & 0xff00) | (value >> 2);
7216 bfd_put_16 (input_bfd, insn, hit_data);
7218 return bfd_reloc_ok;
7221 case R_ARM_THM_PC12:
7222 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7225 bfd_signed_vma relocation;
7227 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7228 | bfd_get_16 (input_bfd, hit_data + 2);
7230 if (globals->use_rel)
7232 signed_addend = insn & 0xfff;
7233 if (!(insn & (1 << 23)))
7234 signed_addend = -signed_addend;
7237 relocation = value + signed_addend;
7238 relocation -= (input_section->output_section->vma
7239 + input_section->output_offset
7242 value = abs (relocation);
7244 if (value >= 0x1000)
7245 return bfd_reloc_overflow;
7247 insn = (insn & 0xff7ff000) | value;
7248 if (relocation >= 0)
7251 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7252 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7254 return bfd_reloc_ok;
7257 case R_ARM_THM_XPC22:
7258 case R_ARM_THM_CALL:
7259 case R_ARM_THM_JUMP24:
7260 /* Thumb BL (branch long instruction). */
7264 bfd_boolean overflow = FALSE;
7265 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7266 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7267 bfd_signed_vma reloc_signed_max;
7268 bfd_signed_vma reloc_signed_min;
7270 bfd_signed_vma signed_check;
7272 int thumb2 = using_thumb2 (globals);
7274 /* A branch to an undefined weak symbol is turned into a jump to
7275 the next instruction unless a PLT entry will be created. */
7276 if (h && h->root.type == bfd_link_hash_undefweak
7277 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7279 bfd_put_16 (input_bfd, 0xe000, hit_data);
7280 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7281 return bfd_reloc_ok;
7284 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7285 with Thumb-1) involving the J1 and J2 bits. */
7286 if (globals->use_rel)
7288 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7289 bfd_vma upper = upper_insn & 0x3ff;
7290 bfd_vma lower = lower_insn & 0x7ff;
7291 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7292 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7293 bfd_vma i1 = j1 ^ s ? 0 : 1;
7294 bfd_vma i2 = j2 ^ s ? 0 : 1;
7296 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7298 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7300 signed_addend = addend;
7303 if (r_type == R_ARM_THM_XPC22)
7305 /* Check for Thumb to Thumb call. */
7306 /* FIXME: Should we translate the instruction into a BL
7307 instruction instead ? */
7308 if (sym_flags == STT_ARM_TFUNC)
7309 (*_bfd_error_handler)
7310 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7312 h ? h->root.root.string : "(local)");
7316 /* If it is not a call to Thumb, assume call to Arm.
7317 If it is a call relative to a section name, then it is not a
7318 function call at all, but rather a long jump. Calls through
7319 the PLT do not require stubs. */
7320 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7321 && (h == NULL || splt == NULL
7322 || h->plt.offset == (bfd_vma) -1))
7324 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7326 /* Convert BL to BLX. */
7327 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7329 else if (( r_type != R_ARM_THM_CALL)
7330 && (r_type != R_ARM_THM_JUMP24))
7332 if (elf32_thumb_to_arm_stub
7333 (info, sym_name, input_bfd, output_bfd, input_section,
7334 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7336 return bfd_reloc_ok;
7338 return bfd_reloc_dangerous;
7341 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7342 && r_type == R_ARM_THM_CALL)
7344 /* Make sure this is a BL. */
7345 lower_insn |= 0x1800;
7349 /* Handle calls via the PLT. */
7350 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7352 value = (splt->output_section->vma
7353 + splt->output_offset
7355 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7357 /* If the Thumb BLX instruction is available, convert the
7358 BL to a BLX instruction to call the ARM-mode PLT entry. */
7359 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7362 /* Target the Thumb stub before the ARM PLT entry. */
7363 value -= PLT_THUMB_STUB_SIZE;
7364 *unresolved_reloc_p = FALSE;
7367 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7369 /* Check if a stub has to be inserted because the destination
7372 bfd_signed_vma branch_offset;
7373 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7375 from = (input_section->output_section->vma
7376 + input_section->output_offset
7378 branch_offset = (bfd_signed_vma)(value - from);
7381 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7382 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7385 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7386 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7387 || ((sym_flags != STT_ARM_TFUNC)
7388 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7389 || r_type == R_ARM_THM_JUMP24)))
7391 /* The target is out of reach or we are changing modes, so
7392 redirect the branch to the local stub for this
7394 stub_entry = elf32_arm_get_stub_entry (input_section,
7397 if (stub_entry != NULL)
7398 value = (stub_entry->stub_offset
7399 + stub_entry->stub_sec->output_offset
7400 + stub_entry->stub_sec->output_section->vma);
7402 /* If this call becomes a call to Arm, force BLX. */
7403 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7406 && !arm_stub_is_thumb (stub_entry->stub_type))
7407 || (sym_flags != STT_ARM_TFUNC))
7408 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7413 relocation = value + signed_addend;
7415 relocation -= (input_section->output_section->vma
7416 + input_section->output_offset
7419 check = relocation >> howto->rightshift;
7421 /* If this is a signed value, the rightshift just dropped
7422 leading 1 bits (assuming twos complement). */
7423 if ((bfd_signed_vma) relocation >= 0)
7424 signed_check = check;
7426 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7428 /* Calculate the permissable maximum and minimum values for
7429 this relocation according to whether we're relocating for
7431 bitsize = howto->bitsize;
7434 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7435 reloc_signed_min = ~reloc_signed_max;
7437 /* Assumes two's complement. */
7438 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7441 if ((lower_insn & 0x5000) == 0x4000)
7442 /* For a BLX instruction, make sure that the relocation is rounded up
7443 to a word boundary. This follows the semantics of the instruction
7444 which specifies that bit 1 of the target address will come from bit
7445 1 of the base address. */
7446 relocation = (relocation + 2) & ~ 3;
7448 /* Put RELOCATION back into the insn. Assumes two's complement.
7449 We use the Thumb-2 encoding, which is safe even if dealing with
7450 a Thumb-1 instruction by virtue of our overflow check above. */
7451 reloc_sign = (signed_check < 0) ? 1 : 0;
7452 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7453 | ((relocation >> 12) & 0x3ff)
7454 | (reloc_sign << 10);
7455 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7456 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7457 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7458 | ((relocation >> 1) & 0x7ff);
7460 /* Put the relocated value back in the object file: */
7461 bfd_put_16 (input_bfd, upper_insn, hit_data);
7462 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7464 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7468 case R_ARM_THM_JUMP19:
7469 /* Thumb32 conditional branch instruction. */
7472 bfd_boolean overflow = FALSE;
7473 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7474 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7475 bfd_signed_vma reloc_signed_max = 0xffffe;
7476 bfd_signed_vma reloc_signed_min = -0x100000;
7477 bfd_signed_vma signed_check;
7479 /* Need to refetch the addend, reconstruct the top three bits,
7480 and squish the two 11 bit pieces together. */
7481 if (globals->use_rel)
7483 bfd_vma S = (upper_insn & 0x0400) >> 10;
7484 bfd_vma upper = (upper_insn & 0x003f);
7485 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7486 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7487 bfd_vma lower = (lower_insn & 0x07ff);
7492 upper -= 0x0100; /* Sign extend. */
7494 addend = (upper << 12) | (lower << 1);
7495 signed_addend = addend;
7498 /* Handle calls via the PLT. */
7499 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7501 value = (splt->output_section->vma
7502 + splt->output_offset
7504 /* Target the Thumb stub before the ARM PLT entry. */
7505 value -= PLT_THUMB_STUB_SIZE;
7506 *unresolved_reloc_p = FALSE;
7509 /* ??? Should handle interworking? GCC might someday try to
7510 use this for tail calls. */
7512 relocation = value + signed_addend;
7513 relocation -= (input_section->output_section->vma
7514 + input_section->output_offset
7516 signed_check = (bfd_signed_vma) relocation;
7518 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7521 /* Put RELOCATION back into the insn. */
7523 bfd_vma S = (relocation & 0x00100000) >> 20;
7524 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7525 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7526 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7527 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7529 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7530 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7533 /* Put the relocated value back in the object file: */
7534 bfd_put_16 (input_bfd, upper_insn, hit_data);
7535 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7537 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7540 case R_ARM_THM_JUMP11:
7541 case R_ARM_THM_JUMP8:
7542 case R_ARM_THM_JUMP6:
7543 /* Thumb B (branch) instruction). */
7545 bfd_signed_vma relocation;
7546 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7547 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7548 bfd_signed_vma signed_check;
7550 /* CZB cannot jump backward. */
7551 if (r_type == R_ARM_THM_JUMP6)
7552 reloc_signed_min = 0;
7554 if (globals->use_rel)
7556 /* Need to refetch addend. */
7557 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7558 if (addend & ((howto->src_mask + 1) >> 1))
7561 signed_addend &= ~ howto->src_mask;
7562 signed_addend |= addend;
7565 signed_addend = addend;
7566 /* The value in the insn has been right shifted. We need to
7567 undo this, so that we can perform the address calculation
7568 in terms of bytes. */
7569 signed_addend <<= howto->rightshift;
7571 relocation = value + signed_addend;
7573 relocation -= (input_section->output_section->vma
7574 + input_section->output_offset
7577 relocation >>= howto->rightshift;
7578 signed_check = relocation;
7580 if (r_type == R_ARM_THM_JUMP6)
7581 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7583 relocation &= howto->dst_mask;
7584 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7586 bfd_put_16 (input_bfd, relocation, hit_data);
7588 /* Assumes two's complement. */
7589 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7590 return bfd_reloc_overflow;
7592 return bfd_reloc_ok;
7595 case R_ARM_ALU_PCREL7_0:
7596 case R_ARM_ALU_PCREL15_8:
7597 case R_ARM_ALU_PCREL23_15:
7602 insn = bfd_get_32 (input_bfd, hit_data);
7603 if (globals->use_rel)
7605 /* Extract the addend. */
7606 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7607 signed_addend = addend;
7609 relocation = value + signed_addend;
7611 relocation -= (input_section->output_section->vma
7612 + input_section->output_offset
7614 insn = (insn & ~0xfff)
7615 | ((howto->bitpos << 7) & 0xf00)
7616 | ((relocation >> howto->bitpos) & 0xff);
7617 bfd_put_32 (input_bfd, value, hit_data);
7619 return bfd_reloc_ok;
7621 case R_ARM_GNU_VTINHERIT:
7622 case R_ARM_GNU_VTENTRY:
7623 return bfd_reloc_ok;
7625 case R_ARM_GOTOFF32:
7626 /* Relocation is relative to the start of the
7627 global offset table. */
7629 BFD_ASSERT (sgot != NULL);
7631 return bfd_reloc_notsupported;
7633 /* If we are addressing a Thumb function, we need to adjust the
7634 address by one, so that attempts to call the function pointer will
7635 correctly interpret it as Thumb code. */
7636 if (sym_flags == STT_ARM_TFUNC)
7639 /* Note that sgot->output_offset is not involved in this
7640 calculation. We always want the start of .got. If we
7641 define _GLOBAL_OFFSET_TABLE in a different way, as is
7642 permitted by the ABI, we might have to change this
7644 value -= sgot->output_section->vma;
7645 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7646 contents, rel->r_offset, value,
7650 /* Use global offset table as symbol value. */
7651 BFD_ASSERT (sgot != NULL);
7654 return bfd_reloc_notsupported;
7656 *unresolved_reloc_p = FALSE;
7657 value = sgot->output_section->vma;
7658 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7659 contents, rel->r_offset, value,
7663 case R_ARM_GOT_PREL:
7664 /* Relocation is to the entry for this symbol in the
7665 global offset table. */
7667 return bfd_reloc_notsupported;
7674 off = h->got.offset;
7675 BFD_ASSERT (off != (bfd_vma) -1);
7676 dyn = globals->root.dynamic_sections_created;
7678 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7680 && SYMBOL_REFERENCES_LOCAL (info, h))
7681 || (ELF_ST_VISIBILITY (h->other)
7682 && h->root.type == bfd_link_hash_undefweak))
7684 /* This is actually a static link, or it is a -Bsymbolic link
7685 and the symbol is defined locally. We must initialize this
7686 entry in the global offset table. Since the offset must
7687 always be a multiple of 4, we use the least significant bit
7688 to record whether we have initialized it already.
7690 When doing a dynamic link, we create a .rel(a).got relocation
7691 entry to initialize the value. This is done in the
7692 finish_dynamic_symbol routine. */
7697 /* If we are addressing a Thumb function, we need to
7698 adjust the address by one, so that attempts to
7699 call the function pointer will correctly
7700 interpret it as Thumb code. */
7701 if (sym_flags == STT_ARM_TFUNC)
7704 bfd_put_32 (output_bfd, value, sgot->contents + off);
7709 *unresolved_reloc_p = FALSE;
7711 value = sgot->output_offset + off;
7717 BFD_ASSERT (local_got_offsets != NULL &&
7718 local_got_offsets[r_symndx] != (bfd_vma) -1);
7720 off = local_got_offsets[r_symndx];
7722 /* The offset must always be a multiple of 4. We use the
7723 least significant bit to record whether we have already
7724 generated the necessary reloc. */
7729 /* If we are addressing a Thumb function, we need to
7730 adjust the address by one, so that attempts to
7731 call the function pointer will correctly
7732 interpret it as Thumb code. */
7733 if (sym_flags == STT_ARM_TFUNC)
7736 if (globals->use_rel)
7737 bfd_put_32 (output_bfd, value, sgot->contents + off);
7742 Elf_Internal_Rela outrel;
7745 srelgot = (bfd_get_section_by_name
7746 (dynobj, RELOC_SECTION (globals, ".got")));
7747 BFD_ASSERT (srelgot != NULL);
7749 outrel.r_addend = addend + value;
7750 outrel.r_offset = (sgot->output_section->vma
7751 + sgot->output_offset
7753 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7754 loc = srelgot->contents;
7755 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7756 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7759 local_got_offsets[r_symndx] |= 1;
7762 value = sgot->output_offset + off;
7764 if (r_type != R_ARM_GOT32)
7765 value += sgot->output_section->vma;
7767 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7768 contents, rel->r_offset, value,
7771 case R_ARM_TLS_LDO32:
7772 value = value - dtpoff_base (info);
7774 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7775 contents, rel->r_offset, value,
7778 case R_ARM_TLS_LDM32:
7782 if (globals->sgot == NULL)
7785 off = globals->tls_ldm_got.offset;
7791 /* If we don't know the module number, create a relocation
7795 Elf_Internal_Rela outrel;
7798 if (globals->srelgot == NULL)
7801 outrel.r_addend = 0;
7802 outrel.r_offset = (globals->sgot->output_section->vma
7803 + globals->sgot->output_offset + off);
7804 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7806 if (globals->use_rel)
7807 bfd_put_32 (output_bfd, outrel.r_addend,
7808 globals->sgot->contents + off);
7810 loc = globals->srelgot->contents;
7811 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7812 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7815 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7817 globals->tls_ldm_got.offset |= 1;
7820 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7821 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7823 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7824 contents, rel->r_offset, value,
7828 case R_ARM_TLS_GD32:
7829 case R_ARM_TLS_IE32:
7835 if (globals->sgot == NULL)
7842 dyn = globals->root.dynamic_sections_created;
7843 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7845 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7847 *unresolved_reloc_p = FALSE;
7850 off = h->got.offset;
7851 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7855 if (local_got_offsets == NULL)
7857 off = local_got_offsets[r_symndx];
7858 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7861 if (tls_type == GOT_UNKNOWN)
7868 bfd_boolean need_relocs = FALSE;
7869 Elf_Internal_Rela outrel;
7870 bfd_byte *loc = NULL;
7873 /* The GOT entries have not been initialized yet. Do it
7874 now, and emit any relocations. If both an IE GOT and a
7875 GD GOT are necessary, we emit the GD first. */
7877 if ((info->shared || indx != 0)
7879 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7880 || h->root.type != bfd_link_hash_undefweak))
7883 if (globals->srelgot == NULL)
7885 loc = globals->srelgot->contents;
7886 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7889 if (tls_type & GOT_TLS_GD)
7893 outrel.r_addend = 0;
7894 outrel.r_offset = (globals->sgot->output_section->vma
7895 + globals->sgot->output_offset
7897 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7899 if (globals->use_rel)
7900 bfd_put_32 (output_bfd, outrel.r_addend,
7901 globals->sgot->contents + cur_off);
7903 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7904 globals->srelgot->reloc_count++;
7905 loc += RELOC_SIZE (globals);
7908 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7909 globals->sgot->contents + cur_off + 4);
7912 outrel.r_addend = 0;
7913 outrel.r_info = ELF32_R_INFO (indx,
7914 R_ARM_TLS_DTPOFF32);
7915 outrel.r_offset += 4;
7917 if (globals->use_rel)
7918 bfd_put_32 (output_bfd, outrel.r_addend,
7919 globals->sgot->contents + cur_off + 4);
7922 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7923 globals->srelgot->reloc_count++;
7924 loc += RELOC_SIZE (globals);
7929 /* If we are not emitting relocations for a
7930 general dynamic reference, then we must be in a
7931 static link or an executable link with the
7932 symbol binding locally. Mark it as belonging
7933 to module 1, the executable. */
7934 bfd_put_32 (output_bfd, 1,
7935 globals->sgot->contents + cur_off);
7936 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7937 globals->sgot->contents + cur_off + 4);
7943 if (tls_type & GOT_TLS_IE)
7948 outrel.r_addend = value - dtpoff_base (info);
7950 outrel.r_addend = 0;
7951 outrel.r_offset = (globals->sgot->output_section->vma
7952 + globals->sgot->output_offset
7954 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7956 if (globals->use_rel)
7957 bfd_put_32 (output_bfd, outrel.r_addend,
7958 globals->sgot->contents + cur_off);
7960 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7961 globals->srelgot->reloc_count++;
7962 loc += RELOC_SIZE (globals);
7965 bfd_put_32 (output_bfd, tpoff (info, value),
7966 globals->sgot->contents + cur_off);
7973 local_got_offsets[r_symndx] |= 1;
7976 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7978 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7979 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7981 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7982 contents, rel->r_offset, value,
7986 case R_ARM_TLS_LE32:
7989 (*_bfd_error_handler)
7990 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7991 input_bfd, input_section,
7992 (long) rel->r_offset, howto->name);
7996 value = tpoff (info, value);
7998 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7999 contents, rel->r_offset, value,
8003 if (globals->fix_v4bx)
8005 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8007 /* Ensure that we have a BX instruction. */
8008 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8010 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8012 /* Branch to veneer. */
8014 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8015 glue_addr -= input_section->output_section->vma
8016 + input_section->output_offset
8017 + rel->r_offset + 8;
8018 insn = (insn & 0xf0000000) | 0x0a000000
8019 | ((glue_addr >> 2) & 0x00ffffff);
8023 /* Preserve Rm (lowest four bits) and the condition code
8024 (highest four bits). Other bits encode MOV PC,Rm. */
8025 insn = (insn & 0xf000000f) | 0x01a0f000;
8028 bfd_put_32 (input_bfd, insn, hit_data);
8030 return bfd_reloc_ok;
8032 case R_ARM_MOVW_ABS_NC:
8033 case R_ARM_MOVT_ABS:
8034 case R_ARM_MOVW_PREL_NC:
8035 case R_ARM_MOVT_PREL:
8036 /* Until we properly support segment-base-relative addressing then
8037 we assume the segment base to be zero, as for the group relocations.
8038 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8039 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8040 case R_ARM_MOVW_BREL_NC:
8041 case R_ARM_MOVW_BREL:
8042 case R_ARM_MOVT_BREL:
8044 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8046 if (globals->use_rel)
8048 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8049 signed_addend = (addend ^ 0x8000) - 0x8000;
8052 value += signed_addend;
8054 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8055 value -= (input_section->output_section->vma
8056 + input_section->output_offset + rel->r_offset);
8058 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8059 return bfd_reloc_overflow;
8061 if (sym_flags == STT_ARM_TFUNC)
8064 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8065 || r_type == R_ARM_MOVT_BREL)
8069 insn |= value & 0xfff;
8070 insn |= (value & 0xf000) << 4;
8071 bfd_put_32 (input_bfd, insn, hit_data);
8073 return bfd_reloc_ok;
8075 case R_ARM_THM_MOVW_ABS_NC:
8076 case R_ARM_THM_MOVT_ABS:
8077 case R_ARM_THM_MOVW_PREL_NC:
8078 case R_ARM_THM_MOVT_PREL:
8079 /* Until we properly support segment-base-relative addressing then
8080 we assume the segment base to be zero, as for the above relocations.
8081 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8082 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8083 as R_ARM_THM_MOVT_ABS. */
8084 case R_ARM_THM_MOVW_BREL_NC:
8085 case R_ARM_THM_MOVW_BREL:
8086 case R_ARM_THM_MOVT_BREL:
8090 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8091 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8093 if (globals->use_rel)
8095 addend = ((insn >> 4) & 0xf000)
8096 | ((insn >> 15) & 0x0800)
8097 | ((insn >> 4) & 0x0700)
8099 signed_addend = (addend ^ 0x8000) - 0x8000;
8102 value += signed_addend;
8104 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8105 value -= (input_section->output_section->vma
8106 + input_section->output_offset + rel->r_offset);
8108 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8109 return bfd_reloc_overflow;
8111 if (sym_flags == STT_ARM_TFUNC)
8114 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8115 || r_type == R_ARM_THM_MOVT_BREL)
8119 insn |= (value & 0xf000) << 4;
8120 insn |= (value & 0x0800) << 15;
8121 insn |= (value & 0x0700) << 4;
8122 insn |= (value & 0x00ff);
8124 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8125 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8127 return bfd_reloc_ok;
8129 case R_ARM_ALU_PC_G0_NC:
8130 case R_ARM_ALU_PC_G1_NC:
8131 case R_ARM_ALU_PC_G0:
8132 case R_ARM_ALU_PC_G1:
8133 case R_ARM_ALU_PC_G2:
8134 case R_ARM_ALU_SB_G0_NC:
8135 case R_ARM_ALU_SB_G1_NC:
8136 case R_ARM_ALU_SB_G0:
8137 case R_ARM_ALU_SB_G1:
8138 case R_ARM_ALU_SB_G2:
8140 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8141 bfd_vma pc = input_section->output_section->vma
8142 + input_section->output_offset + rel->r_offset;
8143 /* sb should be the origin of the *segment* containing the symbol.
8144 It is not clear how to obtain this OS-dependent value, so we
8145 make an arbitrary choice of zero. */
8149 bfd_signed_vma signed_value;
8152 /* Determine which group of bits to select. */
8155 case R_ARM_ALU_PC_G0_NC:
8156 case R_ARM_ALU_PC_G0:
8157 case R_ARM_ALU_SB_G0_NC:
8158 case R_ARM_ALU_SB_G0:
8162 case R_ARM_ALU_PC_G1_NC:
8163 case R_ARM_ALU_PC_G1:
8164 case R_ARM_ALU_SB_G1_NC:
8165 case R_ARM_ALU_SB_G1:
8169 case R_ARM_ALU_PC_G2:
8170 case R_ARM_ALU_SB_G2:
8178 /* If REL, extract the addend from the insn. If RELA, it will
8179 have already been fetched for us. */
8180 if (globals->use_rel)
8183 bfd_vma constant = insn & 0xff;
8184 bfd_vma rotation = (insn & 0xf00) >> 8;
8187 signed_addend = constant;
8190 /* Compensate for the fact that in the instruction, the
8191 rotation is stored in multiples of 2 bits. */
8194 /* Rotate "constant" right by "rotation" bits. */
8195 signed_addend = (constant >> rotation) |
8196 (constant << (8 * sizeof (bfd_vma) - rotation));
8199 /* Determine if the instruction is an ADD or a SUB.
8200 (For REL, this determines the sign of the addend.) */
8201 negative = identify_add_or_sub (insn);
8204 (*_bfd_error_handler)
8205 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8206 input_bfd, input_section,
8207 (long) rel->r_offset, howto->name);
8208 return bfd_reloc_overflow;
8211 signed_addend *= negative;
8214 /* Compute the value (X) to go in the place. */
8215 if (r_type == R_ARM_ALU_PC_G0_NC
8216 || r_type == R_ARM_ALU_PC_G1_NC
8217 || r_type == R_ARM_ALU_PC_G0
8218 || r_type == R_ARM_ALU_PC_G1
8219 || r_type == R_ARM_ALU_PC_G2)
8221 signed_value = value - pc + signed_addend;
8223 /* Section base relative. */
8224 signed_value = value - sb + signed_addend;
8226 /* If the target symbol is a Thumb function, then set the
8227 Thumb bit in the address. */
8228 if (sym_flags == STT_ARM_TFUNC)
8231 /* Calculate the value of the relevant G_n, in encoded
8232 constant-with-rotation format. */
8233 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8236 /* Check for overflow if required. */
8237 if ((r_type == R_ARM_ALU_PC_G0
8238 || r_type == R_ARM_ALU_PC_G1
8239 || r_type == R_ARM_ALU_PC_G2
8240 || r_type == R_ARM_ALU_SB_G0
8241 || r_type == R_ARM_ALU_SB_G1
8242 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8244 (*_bfd_error_handler)
8245 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8246 input_bfd, input_section,
8247 (long) rel->r_offset, abs (signed_value), howto->name);
8248 return bfd_reloc_overflow;
8251 /* Mask out the value and the ADD/SUB part of the opcode; take care
8252 not to destroy the S bit. */
8255 /* Set the opcode according to whether the value to go in the
8256 place is negative. */
8257 if (signed_value < 0)
8262 /* Encode the offset. */
8265 bfd_put_32 (input_bfd, insn, hit_data);
8267 return bfd_reloc_ok;
8269 case R_ARM_LDR_PC_G0:
8270 case R_ARM_LDR_PC_G1:
8271 case R_ARM_LDR_PC_G2:
8272 case R_ARM_LDR_SB_G0:
8273 case R_ARM_LDR_SB_G1:
8274 case R_ARM_LDR_SB_G2:
8276 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8277 bfd_vma pc = input_section->output_section->vma
8278 + input_section->output_offset + rel->r_offset;
8279 bfd_vma sb = 0; /* See note above. */
8281 bfd_signed_vma signed_value;
8284 /* Determine which groups of bits to calculate. */
8287 case R_ARM_LDR_PC_G0:
8288 case R_ARM_LDR_SB_G0:
8292 case R_ARM_LDR_PC_G1:
8293 case R_ARM_LDR_SB_G1:
8297 case R_ARM_LDR_PC_G2:
8298 case R_ARM_LDR_SB_G2:
8306 /* If REL, extract the addend from the insn. If RELA, it will
8307 have already been fetched for us. */
8308 if (globals->use_rel)
8310 int negative = (insn & (1 << 23)) ? 1 : -1;
8311 signed_addend = negative * (insn & 0xfff);
8314 /* Compute the value (X) to go in the place. */
8315 if (r_type == R_ARM_LDR_PC_G0
8316 || r_type == R_ARM_LDR_PC_G1
8317 || r_type == R_ARM_LDR_PC_G2)
8319 signed_value = value - pc + signed_addend;
8321 /* Section base relative. */
8322 signed_value = value - sb + signed_addend;
8324 /* Calculate the value of the relevant G_{n-1} to obtain
8325 the residual at that stage. */
8326 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8328 /* Check for overflow. */
8329 if (residual >= 0x1000)
8331 (*_bfd_error_handler)
8332 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8333 input_bfd, input_section,
8334 (long) rel->r_offset, abs (signed_value), howto->name);
8335 return bfd_reloc_overflow;
8338 /* Mask out the value and U bit. */
8341 /* Set the U bit if the value to go in the place is non-negative. */
8342 if (signed_value >= 0)
8345 /* Encode the offset. */
8348 bfd_put_32 (input_bfd, insn, hit_data);
8350 return bfd_reloc_ok;
8352 case R_ARM_LDRS_PC_G0:
8353 case R_ARM_LDRS_PC_G1:
8354 case R_ARM_LDRS_PC_G2:
8355 case R_ARM_LDRS_SB_G0:
8356 case R_ARM_LDRS_SB_G1:
8357 case R_ARM_LDRS_SB_G2:
8359 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8360 bfd_vma pc = input_section->output_section->vma
8361 + input_section->output_offset + rel->r_offset;
8362 bfd_vma sb = 0; /* See note above. */
8364 bfd_signed_vma signed_value;
8367 /* Determine which groups of bits to calculate. */
8370 case R_ARM_LDRS_PC_G0:
8371 case R_ARM_LDRS_SB_G0:
8375 case R_ARM_LDRS_PC_G1:
8376 case R_ARM_LDRS_SB_G1:
8380 case R_ARM_LDRS_PC_G2:
8381 case R_ARM_LDRS_SB_G2:
8389 /* If REL, extract the addend from the insn. If RELA, it will
8390 have already been fetched for us. */
8391 if (globals->use_rel)
8393 int negative = (insn & (1 << 23)) ? 1 : -1;
8394 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8397 /* Compute the value (X) to go in the place. */
8398 if (r_type == R_ARM_LDRS_PC_G0
8399 || r_type == R_ARM_LDRS_PC_G1
8400 || r_type == R_ARM_LDRS_PC_G2)
8402 signed_value = value - pc + signed_addend;
8404 /* Section base relative. */
8405 signed_value = value - sb + signed_addend;
8407 /* Calculate the value of the relevant G_{n-1} to obtain
8408 the residual at that stage. */
8409 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8411 /* Check for overflow. */
8412 if (residual >= 0x100)
8414 (*_bfd_error_handler)
8415 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8416 input_bfd, input_section,
8417 (long) rel->r_offset, abs (signed_value), howto->name);
8418 return bfd_reloc_overflow;
8421 /* Mask out the value and U bit. */
8424 /* Set the U bit if the value to go in the place is non-negative. */
8425 if (signed_value >= 0)
8428 /* Encode the offset. */
8429 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8431 bfd_put_32 (input_bfd, insn, hit_data);
8433 return bfd_reloc_ok;
8435 case R_ARM_LDC_PC_G0:
8436 case R_ARM_LDC_PC_G1:
8437 case R_ARM_LDC_PC_G2:
8438 case R_ARM_LDC_SB_G0:
8439 case R_ARM_LDC_SB_G1:
8440 case R_ARM_LDC_SB_G2:
8442 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8443 bfd_vma pc = input_section->output_section->vma
8444 + input_section->output_offset + rel->r_offset;
8445 bfd_vma sb = 0; /* See note above. */
8447 bfd_signed_vma signed_value;
8450 /* Determine which groups of bits to calculate. */
8453 case R_ARM_LDC_PC_G0:
8454 case R_ARM_LDC_SB_G0:
8458 case R_ARM_LDC_PC_G1:
8459 case R_ARM_LDC_SB_G1:
8463 case R_ARM_LDC_PC_G2:
8464 case R_ARM_LDC_SB_G2:
8472 /* If REL, extract the addend from the insn. If RELA, it will
8473 have already been fetched for us. */
8474 if (globals->use_rel)
8476 int negative = (insn & (1 << 23)) ? 1 : -1;
8477 signed_addend = negative * ((insn & 0xff) << 2);
8480 /* Compute the value (X) to go in the place. */
8481 if (r_type == R_ARM_LDC_PC_G0
8482 || r_type == R_ARM_LDC_PC_G1
8483 || r_type == R_ARM_LDC_PC_G2)
8485 signed_value = value - pc + signed_addend;
8487 /* Section base relative. */
8488 signed_value = value - sb + signed_addend;
8490 /* Calculate the value of the relevant G_{n-1} to obtain
8491 the residual at that stage. */
8492 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8494 /* Check for overflow. (The absolute value to go in the place must be
8495 divisible by four and, after having been divided by four, must
8496 fit in eight bits.) */
8497 if ((residual & 0x3) != 0 || residual >= 0x400)
8499 (*_bfd_error_handler)
8500 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8501 input_bfd, input_section,
8502 (long) rel->r_offset, abs (signed_value), howto->name);
8503 return bfd_reloc_overflow;
8506 /* Mask out the value and U bit. */
8509 /* Set the U bit if the value to go in the place is non-negative. */
8510 if (signed_value >= 0)
8513 /* Encode the offset. */
8514 insn |= residual >> 2;
8516 bfd_put_32 (input_bfd, insn, hit_data);
8518 return bfd_reloc_ok;
8521 return bfd_reloc_notsupported;
8525 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8527 arm_add_to_rel (bfd * abfd,
8529 reloc_howto_type * howto,
8530 bfd_signed_vma increment)
8532 bfd_signed_vma addend;
8534 if (howto->type == R_ARM_THM_CALL
8535 || howto->type == R_ARM_THM_JUMP24)
8537 int upper_insn, lower_insn;
8540 upper_insn = bfd_get_16 (abfd, address);
8541 lower_insn = bfd_get_16 (abfd, address + 2);
8542 upper = upper_insn & 0x7ff;
8543 lower = lower_insn & 0x7ff;
8545 addend = (upper << 12) | (lower << 1);
8546 addend += increment;
8549 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8550 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8552 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8553 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8559 contents = bfd_get_32 (abfd, address);
8561 /* Get the (signed) value from the instruction. */
8562 addend = contents & howto->src_mask;
8563 if (addend & ((howto->src_mask + 1) >> 1))
8565 bfd_signed_vma mask;
8568 mask &= ~ howto->src_mask;
8572 /* Add in the increment, (which is a byte value). */
8573 switch (howto->type)
8576 addend += increment;
8583 addend <<= howto->size;
8584 addend += increment;
8586 /* Should we check for overflow here ? */
8588 /* Drop any undesired bits. */
8589 addend >>= howto->rightshift;
8593 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8595 bfd_put_32 (abfd, contents, address);
8599 #define IS_ARM_TLS_RELOC(R_TYPE) \
8600 ((R_TYPE) == R_ARM_TLS_GD32 \
8601 || (R_TYPE) == R_ARM_TLS_LDO32 \
8602 || (R_TYPE) == R_ARM_TLS_LDM32 \
8603 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8604 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8605 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8606 || (R_TYPE) == R_ARM_TLS_LE32 \
8607 || (R_TYPE) == R_ARM_TLS_IE32)
8609 /* Relocate an ARM ELF section. */
8612 elf32_arm_relocate_section (bfd * output_bfd,
8613 struct bfd_link_info * info,
8615 asection * input_section,
8616 bfd_byte * contents,
8617 Elf_Internal_Rela * relocs,
8618 Elf_Internal_Sym * local_syms,
8619 asection ** local_sections)
8621 Elf_Internal_Shdr *symtab_hdr;
8622 struct elf_link_hash_entry **sym_hashes;
8623 Elf_Internal_Rela *rel;
8624 Elf_Internal_Rela *relend;
8626 struct elf32_arm_link_hash_table * globals;
8628 globals = elf32_arm_hash_table (info);
8630 symtab_hdr = & elf_symtab_hdr (input_bfd);
8631 sym_hashes = elf_sym_hashes (input_bfd);
8634 relend = relocs + input_section->reloc_count;
8635 for (; rel < relend; rel++)
8638 reloc_howto_type * howto;
8639 unsigned long r_symndx;
8640 Elf_Internal_Sym * sym;
8642 struct elf_link_hash_entry * h;
8644 bfd_reloc_status_type r;
8647 bfd_boolean unresolved_reloc = FALSE;
8648 char *error_message = NULL;
8650 r_symndx = ELF32_R_SYM (rel->r_info);
8651 r_type = ELF32_R_TYPE (rel->r_info);
8652 r_type = arm_real_reloc_type (globals, r_type);
8654 if ( r_type == R_ARM_GNU_VTENTRY
8655 || r_type == R_ARM_GNU_VTINHERIT)
8658 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8659 howto = bfd_reloc.howto;
8665 if (r_symndx < symtab_hdr->sh_info)
8667 sym = local_syms + r_symndx;
8668 sym_type = ELF32_ST_TYPE (sym->st_info);
8669 sec = local_sections[r_symndx];
8670 if (globals->use_rel)
8672 relocation = (sec->output_section->vma
8673 + sec->output_offset
8675 if (!info->relocatable
8676 && (sec->flags & SEC_MERGE)
8677 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8680 bfd_vma addend, value;
8684 case R_ARM_MOVW_ABS_NC:
8685 case R_ARM_MOVT_ABS:
8686 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8687 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8688 addend = (addend ^ 0x8000) - 0x8000;
8691 case R_ARM_THM_MOVW_ABS_NC:
8692 case R_ARM_THM_MOVT_ABS:
8693 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8695 value |= bfd_get_16 (input_bfd,
8696 contents + rel->r_offset + 2);
8697 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8698 | ((value & 0x04000000) >> 15);
8699 addend = (addend ^ 0x8000) - 0x8000;
8703 if (howto->rightshift
8704 || (howto->src_mask & (howto->src_mask + 1)))
8706 (*_bfd_error_handler)
8707 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8708 input_bfd, input_section,
8709 (long) rel->r_offset, howto->name);
8713 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8715 /* Get the (signed) value from the instruction. */
8716 addend = value & howto->src_mask;
8717 if (addend & ((howto->src_mask + 1) >> 1))
8719 bfd_signed_vma mask;
8722 mask &= ~ howto->src_mask;
8730 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8732 addend += msec->output_section->vma + msec->output_offset;
8734 /* Cases here must match those in the preceeding
8735 switch statement. */
8738 case R_ARM_MOVW_ABS_NC:
8739 case R_ARM_MOVT_ABS:
8740 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8742 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8745 case R_ARM_THM_MOVW_ABS_NC:
8746 case R_ARM_THM_MOVT_ABS:
8747 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8748 | (addend & 0xff) | ((addend & 0x0800) << 15);
8749 bfd_put_16 (input_bfd, value >> 16,
8750 contents + rel->r_offset);
8751 bfd_put_16 (input_bfd, value,
8752 contents + rel->r_offset + 2);
8756 value = (value & ~ howto->dst_mask)
8757 | (addend & howto->dst_mask);
8758 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8764 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8770 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8771 r_symndx, symtab_hdr, sym_hashes,
8773 unresolved_reloc, warned);
8778 if (sec != NULL && elf_discarded_section (sec))
8780 /* For relocs against symbols from removed linkonce sections,
8781 or sections discarded by a linker script, we just want the
8782 section contents zeroed. Avoid any special processing. */
8783 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8789 if (info->relocatable)
8791 /* This is a relocatable link. We don't have to change
8792 anything, unless the reloc is against a section symbol,
8793 in which case we have to adjust according to where the
8794 section symbol winds up in the output section. */
8795 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8797 if (globals->use_rel)
8798 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8799 howto, (bfd_signed_vma) sec->output_offset);
8801 rel->r_addend += sec->output_offset;
8807 name = h->root.root.string;
8810 name = (bfd_elf_string_from_elf_section
8811 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8812 if (name == NULL || *name == '\0')
8813 name = bfd_section_name (input_bfd, sec);
8817 && r_type != R_ARM_NONE
8819 || h->root.type == bfd_link_hash_defined
8820 || h->root.type == bfd_link_hash_defweak)
8821 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8823 (*_bfd_error_handler)
8824 ((sym_type == STT_TLS
8825 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8826 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8829 (long) rel->r_offset,
8834 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8835 input_section, contents, rel,
8836 relocation, info, sec, name,
8837 (h ? ELF_ST_TYPE (h->type) :
8838 ELF_ST_TYPE (sym->st_info)), h,
8839 &unresolved_reloc, &error_message);
8841 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8842 because such sections are not SEC_ALLOC and thus ld.so will
8843 not process them. */
8844 if (unresolved_reloc
8845 && !((input_section->flags & SEC_DEBUGGING) != 0
8848 (*_bfd_error_handler)
8849 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8852 (long) rel->r_offset,
8854 h->root.root.string);
8858 if (r != bfd_reloc_ok)
8862 case bfd_reloc_overflow:
8863 /* If the overflowing reloc was to an undefined symbol,
8864 we have already printed one error message and there
8865 is no point complaining again. */
8867 h->root.type != bfd_link_hash_undefined)
8868 && (!((*info->callbacks->reloc_overflow)
8869 (info, (h ? &h->root : NULL), name, howto->name,
8870 (bfd_vma) 0, input_bfd, input_section,
8875 case bfd_reloc_undefined:
8876 if (!((*info->callbacks->undefined_symbol)
8877 (info, name, input_bfd, input_section,
8878 rel->r_offset, TRUE)))
8882 case bfd_reloc_outofrange:
8883 error_message = _("out of range");
8886 case bfd_reloc_notsupported:
8887 error_message = _("unsupported relocation");
8890 case bfd_reloc_dangerous:
8891 /* error_message should already be set. */
8895 error_message = _("unknown error");
8899 BFD_ASSERT (error_message != NULL);
8900 if (!((*info->callbacks->reloc_dangerous)
8901 (info, error_message, input_bfd, input_section,
8912 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8913 adds the edit to the start of the list. (The list must be built in order of
8914 ascending INDEX: the function's callers are primarily responsible for
8915 maintaining that condition). */
8918 add_unwind_table_edit (arm_unwind_table_edit **head,
8919 arm_unwind_table_edit **tail,
8920 arm_unwind_edit_type type,
8921 asection *linked_section,
8924 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8926 new_edit->type = type;
8927 new_edit->linked_section = linked_section;
8928 new_edit->index = index;
8932 new_edit->next = NULL;
8935 (*tail)->next = new_edit;
8944 new_edit->next = *head;
8953 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8955 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8957 adjust_exidx_size(asection *exidx_sec, int adjust)
8961 if (!exidx_sec->rawsize)
8962 exidx_sec->rawsize = exidx_sec->size;
8964 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8965 out_sec = exidx_sec->output_section;
8966 /* Adjust size of output section. */
8967 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8970 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8972 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8974 struct _arm_elf_section_data *exidx_arm_data;
8976 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8977 add_unwind_table_edit (
8978 &exidx_arm_data->u.exidx.unwind_edit_list,
8979 &exidx_arm_data->u.exidx.unwind_edit_tail,
8980 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8982 adjust_exidx_size(exidx_sec, 8);
8985 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8986 made to those tables, such that:
8988 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8989 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8990 codes which have been inlined into the index).
8992 The edits are applied when the tables are written
8993 (in elf32_arm_write_section).
8997 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8998 unsigned int num_text_sections,
8999 struct bfd_link_info *info)
9002 unsigned int last_second_word = 0, i;
9003 asection *last_exidx_sec = NULL;
9004 asection *last_text_sec = NULL;
9005 int last_unwind_type = -1;
9007 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9009 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9013 for (sec = inp->sections; sec != NULL; sec = sec->next)
9015 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9016 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9018 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9021 if (elf_sec->linked_to)
9023 Elf_Internal_Shdr *linked_hdr
9024 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9025 struct _arm_elf_section_data *linked_sec_arm_data
9026 = get_arm_elf_section_data (linked_hdr->bfd_section);
9028 if (linked_sec_arm_data == NULL)
9031 /* Link this .ARM.exidx section back from the text section it
9033 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9038 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9039 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9040 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9043 for (i = 0; i < num_text_sections; i++)
9045 asection *sec = text_section_order[i];
9046 asection *exidx_sec;
9047 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9048 struct _arm_elf_section_data *exidx_arm_data;
9049 bfd_byte *contents = NULL;
9050 int deleted_exidx_bytes = 0;
9052 arm_unwind_table_edit *unwind_edit_head = NULL;
9053 arm_unwind_table_edit *unwind_edit_tail = NULL;
9054 Elf_Internal_Shdr *hdr;
9057 if (arm_data == NULL)
9060 exidx_sec = arm_data->u.text.arm_exidx_sec;
9061 if (exidx_sec == NULL)
9063 /* Section has no unwind data. */
9064 if (last_unwind_type == 0 || !last_exidx_sec)
9067 /* Ignore zero sized sections. */
9071 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9072 last_unwind_type = 0;
9076 /* Skip /DISCARD/ sections. */
9077 if (bfd_is_abs_section (exidx_sec->output_section))
9080 hdr = &elf_section_data (exidx_sec)->this_hdr;
9081 if (hdr->sh_type != SHT_ARM_EXIDX)
9084 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9085 if (exidx_arm_data == NULL)
9088 ibfd = exidx_sec->owner;
9090 if (hdr->contents != NULL)
9091 contents = hdr->contents;
9092 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9096 for (j = 0; j < hdr->sh_size; j += 8)
9098 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9102 /* An EXIDX_CANTUNWIND entry. */
9103 if (second_word == 1)
9105 if (last_unwind_type == 0)
9109 /* Inlined unwinding data. Merge if equal to previous. */
9110 else if ((second_word & 0x80000000) != 0)
9112 if (last_second_word == second_word && last_unwind_type == 1)
9115 last_second_word = second_word;
9117 /* Normal table entry. In theory we could merge these too,
9118 but duplicate entries are likely to be much less common. */
9124 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9125 DELETE_EXIDX_ENTRY, NULL, j / 8);
9127 deleted_exidx_bytes += 8;
9130 last_unwind_type = unwind_type;
9133 /* Free contents if we allocated it ourselves. */
9134 if (contents != hdr->contents)
9137 /* Record edits to be applied later (in elf32_arm_write_section). */
9138 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9139 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9141 if (deleted_exidx_bytes > 0)
9142 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9144 last_exidx_sec = exidx_sec;
9145 last_text_sec = sec;
9148 /* Add terminating CANTUNWIND entry. */
9149 if (last_exidx_sec && last_unwind_type != 0)
9150 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9156 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9157 bfd *ibfd, const char *name)
9159 asection *sec, *osec;
9161 sec = bfd_get_section_by_name (ibfd, name);
9162 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9165 osec = sec->output_section;
9166 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9169 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9170 sec->output_offset, sec->size))
9177 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9179 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9181 /* Invoke the regular ELF backend linker to do all the work. */
9182 if (!bfd_elf_final_link (abfd, info))
9185 /* Write out any glue sections now that we have created all the
9187 if (globals->bfd_of_glue_owner != NULL)
9189 if (! elf32_arm_output_glue_section (info, abfd,
9190 globals->bfd_of_glue_owner,
9191 ARM2THUMB_GLUE_SECTION_NAME))
9194 if (! elf32_arm_output_glue_section (info, abfd,
9195 globals->bfd_of_glue_owner,
9196 THUMB2ARM_GLUE_SECTION_NAME))
9199 if (! elf32_arm_output_glue_section (info, abfd,
9200 globals->bfd_of_glue_owner,
9201 VFP11_ERRATUM_VENEER_SECTION_NAME))
9204 if (! elf32_arm_output_glue_section (info, abfd,
9205 globals->bfd_of_glue_owner,
9206 ARM_BX_GLUE_SECTION_NAME))
9213 /* Set the right machine number. */
9216 elf32_arm_object_p (bfd *abfd)
9220 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9222 if (mach != bfd_mach_arm_unknown)
9223 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9225 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9226 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9229 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9234 /* Function to keep ARM specific flags in the ELF header. */
9237 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9239 if (elf_flags_init (abfd)
9240 && elf_elfheader (abfd)->e_flags != flags)
9242 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9244 if (flags & EF_ARM_INTERWORK)
9245 (*_bfd_error_handler)
9246 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9250 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9256 elf_elfheader (abfd)->e_flags = flags;
9257 elf_flags_init (abfd) = TRUE;
9263 /* Copy backend specific data from one object module to another. */
9266 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9271 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9274 in_flags = elf_elfheader (ibfd)->e_flags;
9275 out_flags = elf_elfheader (obfd)->e_flags;
9277 if (elf_flags_init (obfd)
9278 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9279 && in_flags != out_flags)
9281 /* Cannot mix APCS26 and APCS32 code. */
9282 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9285 /* Cannot mix float APCS and non-float APCS code. */
9286 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9289 /* If the src and dest have different interworking flags
9290 then turn off the interworking bit. */
9291 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9293 if (out_flags & EF_ARM_INTERWORK)
9295 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9298 in_flags &= ~EF_ARM_INTERWORK;
9301 /* Likewise for PIC, though don't warn for this case. */
9302 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9303 in_flags &= ~EF_ARM_PIC;
9306 elf_elfheader (obfd)->e_flags = in_flags;
9307 elf_flags_init (obfd) = TRUE;
9309 /* Also copy the EI_OSABI field. */
9310 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9311 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9313 /* Copy object attributes. */
9314 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9319 /* Values for Tag_ABI_PCS_R9_use. */
9328 /* Values for Tag_ABI_PCS_RW_data. */
9331 AEABI_PCS_RW_data_absolute,
9332 AEABI_PCS_RW_data_PCrel,
9333 AEABI_PCS_RW_data_SBrel,
9334 AEABI_PCS_RW_data_unused
9337 /* Values for Tag_ABI_enum_size. */
9343 AEABI_enum_forced_wide
9346 /* Determine whether an object attribute tag takes an integer, a
9350 elf32_arm_obj_attrs_arg_type (int tag)
9352 if (tag == Tag_compatibility)
9353 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9354 else if (tag == Tag_nodefaults)
9355 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9356 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9357 return ATTR_TYPE_FLAG_STR_VAL;
9359 return ATTR_TYPE_FLAG_INT_VAL;
9361 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9364 /* The ABI defines that Tag_conformance should be emitted first, and that
9365 Tag_nodefaults should be second (if either is defined). This sets those
9366 two positions, and bumps up the position of all the remaining tags to
9369 elf32_arm_obj_attrs_order (int num)
9372 return Tag_conformance;
9374 return Tag_nodefaults;
9375 if ((num - 2) < Tag_nodefaults)
9377 if ((num - 1) < Tag_conformance)
9382 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9383 Returns -1 if no architecture could be read. */
9386 get_secondary_compatible_arch (bfd *abfd)
9388 obj_attribute *attr =
9389 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9391 /* Note: the tag and its argument below are uleb128 values, though
9392 currently-defined values fit in one byte for each. */
9394 && attr->s[0] == Tag_CPU_arch
9395 && (attr->s[1] & 128) != 128
9399 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9403 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9404 The tag is removed if ARCH is -1. */
9407 set_secondary_compatible_arch (bfd *abfd, int arch)
9409 obj_attribute *attr =
9410 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9418 /* Note: the tag and its argument below are uleb128 values, though
9419 currently-defined values fit in one byte for each. */
9421 attr->s = bfd_alloc (abfd, 3);
9422 attr->s[0] = Tag_CPU_arch;
9427 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9431 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9432 int newtag, int secondary_compat)
9434 #define T(X) TAG_CPU_ARCH_##X
9435 int tagl, tagh, result;
9438 T(V6T2), /* PRE_V4. */
9442 T(V6T2), /* V5TE. */
9443 T(V6T2), /* V5TEJ. */
9450 T(V6K), /* PRE_V4. */
9455 T(V6K), /* V5TEJ. */
9457 T(V6KZ), /* V6KZ. */
9463 T(V7), /* PRE_V4. */
9482 T(V6K), /* V5TEJ. */
9484 T(V6KZ), /* V6KZ. */
9497 T(V6K), /* V5TEJ. */
9499 T(V6KZ), /* V6KZ. */
9503 T(V6S_M), /* V6_M. */
9504 T(V6S_M) /* V6S_M. */
9506 const int v4t_plus_v6_m[] =
9512 T(V5TE), /* V5TE. */
9513 T(V5TEJ), /* V5TEJ. */
9515 T(V6KZ), /* V6KZ. */
9516 T(V6T2), /* V6T2. */
9519 T(V6_M), /* V6_M. */
9520 T(V6S_M), /* V6S_M. */
9521 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9530 /* Pseudo-architecture. */
9534 /* Check we've not got a higher architecture than we know about. */
9536 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9538 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9542 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9544 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9545 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9546 oldtag = T(V4T_PLUS_V6_M);
9548 /* And override the new tag if we have a Tag_also_compatible_with on the
9551 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9552 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9553 newtag = T(V4T_PLUS_V6_M);
9555 tagl = (oldtag < newtag) ? oldtag : newtag;
9556 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9558 /* Architectures before V6KZ add features monotonically. */
9559 if (tagh <= TAG_CPU_ARCH_V6KZ)
9562 result = comb[tagh - T(V6T2)][tagl];
9564 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9565 as the canonical version. */
9566 if (result == T(V4T_PLUS_V6_M))
9569 *secondary_compat_out = T(V6_M);
9572 *secondary_compat_out = -1;
9576 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9577 ibfd, oldtag, newtag);
9585 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9586 are conflicting attributes. */
9589 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9591 obj_attribute *in_attr;
9592 obj_attribute *out_attr;
9593 obj_attribute_list *in_list;
9594 obj_attribute_list *out_list;
9595 obj_attribute_list **out_listp;
9596 /* Some tags have 0 = don't care, 1 = strong requirement,
9597 2 = weak requirement. */
9598 static const int order_021[3] = {0, 2, 1};
9599 /* For use with Tag_VFP_arch. */
9600 static const int order_01243[5] = {0, 1, 2, 4, 3};
9602 bfd_boolean result = TRUE;
9604 /* Skip the linker stubs file. This preserves previous behavior
9605 of accepting unknown attributes in the first input file - but
9607 if (ibfd->flags & BFD_LINKER_CREATED)
9610 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9612 /* This is the first object. Copy the attributes. */
9613 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9615 /* Use the Tag_null value to indicate the attributes have been
9617 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9622 in_attr = elf_known_obj_attributes_proc (ibfd);
9623 out_attr = elf_known_obj_attributes_proc (obfd);
9624 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9625 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9627 /* Ignore mismatches if the object doesn't use floating point. */
9628 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9629 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9630 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9633 (_("error: %B uses VFP register arguments, %B does not"),
9639 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9641 /* Merge this attribute with existing attributes. */
9644 case Tag_CPU_raw_name:
9646 /* These are merged after Tag_CPU_arch. */
9649 case Tag_ABI_optimization_goals:
9650 case Tag_ABI_FP_optimization_goals:
9651 /* Use the first value seen. */
9656 int secondary_compat = -1, secondary_compat_out = -1;
9657 unsigned int saved_out_attr = out_attr[i].i;
9658 static const char *name_table[] = {
9659 /* These aren't real CPU names, but we can't guess
9660 that from the architecture version alone. */
9676 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9677 secondary_compat = get_secondary_compatible_arch (ibfd);
9678 secondary_compat_out = get_secondary_compatible_arch (obfd);
9679 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9680 &secondary_compat_out,
9683 set_secondary_compatible_arch (obfd, secondary_compat_out);
9685 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9686 if (out_attr[i].i == saved_out_attr)
9687 ; /* Leave the names alone. */
9688 else if (out_attr[i].i == in_attr[i].i)
9690 /* The output architecture has been changed to match the
9691 input architecture. Use the input names. */
9692 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9693 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9695 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9696 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9701 out_attr[Tag_CPU_name].s = NULL;
9702 out_attr[Tag_CPU_raw_name].s = NULL;
9705 /* If we still don't have a value for Tag_CPU_name,
9706 make one up now. Tag_CPU_raw_name remains blank. */
9707 if (out_attr[Tag_CPU_name].s == NULL
9708 && out_attr[i].i < ARRAY_SIZE (name_table))
9709 out_attr[Tag_CPU_name].s =
9710 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9714 case Tag_ARM_ISA_use:
9715 case Tag_THUMB_ISA_use:
9717 case Tag_Advanced_SIMD_arch:
9718 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9719 case Tag_ABI_FP_rounding:
9720 case Tag_ABI_FP_exceptions:
9721 case Tag_ABI_FP_user_exceptions:
9722 case Tag_ABI_FP_number_model:
9723 case Tag_VFP_HP_extension:
9724 case Tag_CPU_unaligned_access:
9726 case Tag_Virtualization_use:
9727 case Tag_MPextension_use:
9728 /* Use the largest value specified. */
9729 if (in_attr[i].i > out_attr[i].i)
9730 out_attr[i].i = in_attr[i].i;
9733 case Tag_ABI_align8_preserved:
9734 case Tag_ABI_PCS_RO_data:
9735 /* Use the smallest value specified. */
9736 if (in_attr[i].i < out_attr[i].i)
9737 out_attr[i].i = in_attr[i].i;
9740 case Tag_ABI_align8_needed:
9741 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9742 && (in_attr[Tag_ABI_align8_preserved].i == 0
9743 || out_attr[Tag_ABI_align8_preserved].i == 0))
9745 /* This error message should be enabled once all non-conformant
9746 binaries in the toolchain have had the attributes set
9749 (_("error: %B: 8-byte data alignment conflicts with %B"),
9754 case Tag_ABI_FP_denormal:
9755 case Tag_ABI_PCS_GOT_use:
9756 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9757 value if greater than 2 (for future-proofing). */
9758 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9759 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9760 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9761 out_attr[i].i = in_attr[i].i;
9765 case Tag_CPU_arch_profile:
9766 if (out_attr[i].i != in_attr[i].i)
9768 /* 0 will merge with anything.
9769 'A' and 'S' merge to 'A'.
9770 'R' and 'S' merge to 'R'.
9771 'M' and 'A|R|S' is an error. */
9772 if (out_attr[i].i == 0
9773 || (out_attr[i].i == 'S'
9774 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9775 out_attr[i].i = in_attr[i].i;
9776 else if (in_attr[i].i == 0
9777 || (in_attr[i].i == 'S'
9778 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9783 (_("error: %B: Conflicting architecture profiles %c/%c"),
9785 in_attr[i].i ? in_attr[i].i : '0',
9786 out_attr[i].i ? out_attr[i].i : '0');
9792 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9793 largest value if greater than 4 (for future-proofing). */
9794 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9795 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9796 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9797 out_attr[i].i = in_attr[i].i;
9799 case Tag_PCS_config:
9800 if (out_attr[i].i == 0)
9801 out_attr[i].i = in_attr[i].i;
9802 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9804 /* It's sometimes ok to mix different configs, so this is only
9807 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9810 case Tag_ABI_PCS_R9_use:
9811 if (in_attr[i].i != out_attr[i].i
9812 && out_attr[i].i != AEABI_R9_unused
9813 && in_attr[i].i != AEABI_R9_unused)
9816 (_("error: %B: Conflicting use of R9"), ibfd);
9819 if (out_attr[i].i == AEABI_R9_unused)
9820 out_attr[i].i = in_attr[i].i;
9822 case Tag_ABI_PCS_RW_data:
9823 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9824 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9825 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9828 (_("error: %B: SB relative addressing conflicts with use of R9"),
9832 /* Use the smallest value specified. */
9833 if (in_attr[i].i < out_attr[i].i)
9834 out_attr[i].i = in_attr[i].i;
9836 case Tag_ABI_PCS_wchar_t:
9837 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9838 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9841 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9842 ibfd, in_attr[i].i, out_attr[i].i);
9844 else if (in_attr[i].i && !out_attr[i].i)
9845 out_attr[i].i = in_attr[i].i;
9847 case Tag_ABI_enum_size:
9848 if (in_attr[i].i != AEABI_enum_unused)
9850 if (out_attr[i].i == AEABI_enum_unused
9851 || out_attr[i].i == AEABI_enum_forced_wide)
9853 /* The existing object is compatible with anything.
9854 Use whatever requirements the new object has. */
9855 out_attr[i].i = in_attr[i].i;
9857 else if (in_attr[i].i != AEABI_enum_forced_wide
9858 && out_attr[i].i != in_attr[i].i
9859 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9861 static const char *aeabi_enum_names[] =
9862 { "", "variable-size", "32-bit", "" };
9863 const char *in_name =
9864 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9865 ? aeabi_enum_names[in_attr[i].i]
9867 const char *out_name =
9868 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9869 ? aeabi_enum_names[out_attr[i].i]
9872 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9873 ibfd, in_name, out_name);
9877 case Tag_ABI_VFP_args:
9880 case Tag_ABI_WMMX_args:
9881 if (in_attr[i].i != out_attr[i].i)
9884 (_("error: %B uses iWMMXt register arguments, %B does not"),
9889 case Tag_compatibility:
9890 /* Merged in target-independent code. */
9892 case Tag_ABI_HardFP_use:
9893 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9894 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9895 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9897 else if (in_attr[i].i > out_attr[i].i)
9898 out_attr[i].i = in_attr[i].i;
9900 case Tag_ABI_FP_16bit_format:
9901 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9903 if (in_attr[i].i != out_attr[i].i)
9906 (_("error: fp16 format mismatch between %B and %B"),
9911 if (in_attr[i].i != 0)
9912 out_attr[i].i = in_attr[i].i;
9915 case Tag_nodefaults:
9916 /* This tag is set if it exists, but the value is unused (and is
9917 typically zero). We don't actually need to do anything here -
9918 the merge happens automatically when the type flags are merged
9921 case Tag_also_compatible_with:
9922 /* Already done in Tag_CPU_arch. */
9924 case Tag_conformance:
9925 /* Keep the attribute if it matches. Throw it away otherwise.
9926 No attribute means no claim to conform. */
9927 if (!in_attr[i].s || !out_attr[i].s
9928 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9929 out_attr[i].s = NULL;
9934 bfd *err_bfd = NULL;
9936 /* The "known_obj_attributes" table does contain some undefined
9937 attributes. Ensure that there are unused. */
9938 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9940 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9943 if (err_bfd != NULL)
9945 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9949 (_("%B: Unknown mandatory EABI object attribute %d"),
9951 bfd_set_error (bfd_error_bad_value);
9957 (_("Warning: %B: Unknown EABI object attribute %d"),
9962 /* Only pass on attributes that match in both inputs. */
9963 if (in_attr[i].i != out_attr[i].i
9964 || in_attr[i].s != out_attr[i].s
9965 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9966 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9969 out_attr[i].s = NULL;
9974 /* If out_attr was copied from in_attr then it won't have a type yet. */
9975 if (in_attr[i].type && !out_attr[i].type)
9976 out_attr[i].type = in_attr[i].type;
9979 /* Merge Tag_compatibility attributes and any common GNU ones. */
9980 _bfd_elf_merge_object_attributes (ibfd, obfd);
9982 /* Check for any attributes not known on ARM. */
9983 in_list = elf_other_obj_attributes_proc (ibfd);
9984 out_listp = &elf_other_obj_attributes_proc (obfd);
9985 out_list = *out_listp;
9987 for (; in_list || out_list; )
9989 bfd *err_bfd = NULL;
9992 /* The tags for each list are in numerical order. */
9993 /* If the tags are equal, then merge. */
9994 if (out_list && (!in_list || in_list->tag > out_list->tag))
9996 /* This attribute only exists in obfd. We can't merge, and we don't
9997 know what the tag means, so delete it. */
9999 err_tag = out_list->tag;
10000 *out_listp = out_list->next;
10001 out_list = *out_listp;
10003 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10005 /* This attribute only exists in ibfd. We can't merge, and we don't
10006 know what the tag means, so ignore it. */
10008 err_tag = in_list->tag;
10009 in_list = in_list->next;
10011 else /* The tags are equal. */
10013 /* As present, all attributes in the list are unknown, and
10014 therefore can't be merged meaningfully. */
10016 err_tag = out_list->tag;
10018 /* Only pass on attributes that match in both inputs. */
10019 if (in_list->attr.i != out_list->attr.i
10020 || in_list->attr.s != out_list->attr.s
10021 || (in_list->attr.s && out_list->attr.s
10022 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10024 /* No match. Delete the attribute. */
10025 *out_listp = out_list->next;
10026 out_list = *out_listp;
10030 /* Matched. Keep the attribute and move to the next. */
10031 out_list = out_list->next;
10032 in_list = in_list->next;
10038 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10039 if ((err_tag & 127) < 64)
10042 (_("%B: Unknown mandatory EABI object attribute %d"),
10044 bfd_set_error (bfd_error_bad_value);
10050 (_("Warning: %B: Unknown EABI object attribute %d"),
10059 /* Return TRUE if the two EABI versions are incompatible. */
10062 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10064 /* v4 and v5 are the same spec before and after it was released,
10065 so allow mixing them. */
10066 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10067 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10070 return (iver == over);
10073 /* Merge backend specific data from an object file to the output
10074 object file when linking. */
10077 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10079 flagword out_flags;
10081 bfd_boolean flags_compatible = TRUE;
10084 /* Check if we have the same endianess. */
10085 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10088 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10091 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10094 /* The input BFD must have had its flags initialised. */
10095 /* The following seems bogus to me -- The flags are initialized in
10096 the assembler but I don't think an elf_flags_init field is
10097 written into the object. */
10098 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10100 in_flags = elf_elfheader (ibfd)->e_flags;
10101 out_flags = elf_elfheader (obfd)->e_flags;
10103 /* In theory there is no reason why we couldn't handle this. However
10104 in practice it isn't even close to working and there is no real
10105 reason to want it. */
10106 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10107 && !(ibfd->flags & DYNAMIC)
10108 && (in_flags & EF_ARM_BE8))
10110 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10115 if (!elf_flags_init (obfd))
10117 /* If the input is the default architecture and had the default
10118 flags then do not bother setting the flags for the output
10119 architecture, instead allow future merges to do this. If no
10120 future merges ever set these flags then they will retain their
10121 uninitialised values, which surprise surprise, correspond
10122 to the default values. */
10123 if (bfd_get_arch_info (ibfd)->the_default
10124 && elf_elfheader (ibfd)->e_flags == 0)
10127 elf_flags_init (obfd) = TRUE;
10128 elf_elfheader (obfd)->e_flags = in_flags;
10130 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10131 && bfd_get_arch_info (obfd)->the_default)
10132 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10137 /* Determine what should happen if the input ARM architecture
10138 does not match the output ARM architecture. */
10139 if (! bfd_arm_merge_machines (ibfd, obfd))
10142 /* Identical flags must be compatible. */
10143 if (in_flags == out_flags)
10146 /* Check to see if the input BFD actually contains any sections. If
10147 not, its flags may not have been initialised either, but it
10148 cannot actually cause any incompatiblity. Do not short-circuit
10149 dynamic objects; their section list may be emptied by
10150 elf_link_add_object_symbols.
10152 Also check to see if there are no code sections in the input.
10153 In this case there is no need to check for code specific flags.
10154 XXX - do we need to worry about floating-point format compatability
10155 in data sections ? */
10156 if (!(ibfd->flags & DYNAMIC))
10158 bfd_boolean null_input_bfd = TRUE;
10159 bfd_boolean only_data_sections = TRUE;
10161 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10163 /* Ignore synthetic glue sections. */
10164 if (strcmp (sec->name, ".glue_7")
10165 && strcmp (sec->name, ".glue_7t"))
10167 if ((bfd_get_section_flags (ibfd, sec)
10168 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10169 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10170 only_data_sections = FALSE;
10172 null_input_bfd = FALSE;
10177 if (null_input_bfd || only_data_sections)
10181 /* Complain about various flag mismatches. */
10182 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10183 EF_ARM_EABI_VERSION (out_flags)))
10186 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10188 (in_flags & EF_ARM_EABIMASK) >> 24,
10189 (out_flags & EF_ARM_EABIMASK) >> 24);
10193 /* Not sure what needs to be checked for EABI versions >= 1. */
10194 /* VxWorks libraries do not use these flags. */
10195 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10196 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10197 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10199 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10202 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10204 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10205 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10206 flags_compatible = FALSE;
10209 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10211 if (in_flags & EF_ARM_APCS_FLOAT)
10213 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10217 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10220 flags_compatible = FALSE;
10223 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10225 if (in_flags & EF_ARM_VFP_FLOAT)
10227 (_("error: %B uses VFP instructions, whereas %B does not"),
10231 (_("error: %B uses FPA instructions, whereas %B does not"),
10234 flags_compatible = FALSE;
10237 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10239 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10241 (_("error: %B uses Maverick instructions, whereas %B does not"),
10245 (_("error: %B does not use Maverick instructions, whereas %B does"),
10248 flags_compatible = FALSE;
10251 #ifdef EF_ARM_SOFT_FLOAT
10252 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10254 /* We can allow interworking between code that is VFP format
10255 layout, and uses either soft float or integer regs for
10256 passing floating point arguments and results. We already
10257 know that the APCS_FLOAT flags match; similarly for VFP
10259 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10260 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10262 if (in_flags & EF_ARM_SOFT_FLOAT)
10264 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10268 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10271 flags_compatible = FALSE;
10276 /* Interworking mismatch is only a warning. */
10277 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10279 if (in_flags & EF_ARM_INTERWORK)
10282 (_("Warning: %B supports interworking, whereas %B does not"),
10288 (_("Warning: %B does not support interworking, whereas %B does"),
10294 return flags_compatible;
10297 /* Display the flags field. */
10300 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10302 FILE * file = (FILE *) ptr;
10303 unsigned long flags;
10305 BFD_ASSERT (abfd != NULL && ptr != NULL);
10307 /* Print normal ELF private data. */
10308 _bfd_elf_print_private_bfd_data (abfd, ptr);
10310 flags = elf_elfheader (abfd)->e_flags;
10311 /* Ignore init flag - it may not be set, despite the flags field
10312 containing valid data. */
10314 /* xgettext:c-format */
10315 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10317 switch (EF_ARM_EABI_VERSION (flags))
10319 case EF_ARM_EABI_UNKNOWN:
10320 /* The following flag bits are GNU extensions and not part of the
10321 official ARM ELF extended ABI. Hence they are only decoded if
10322 the EABI version is not set. */
10323 if (flags & EF_ARM_INTERWORK)
10324 fprintf (file, _(" [interworking enabled]"));
10326 if (flags & EF_ARM_APCS_26)
10327 fprintf (file, " [APCS-26]");
10329 fprintf (file, " [APCS-32]");
10331 if (flags & EF_ARM_VFP_FLOAT)
10332 fprintf (file, _(" [VFP float format]"));
10333 else if (flags & EF_ARM_MAVERICK_FLOAT)
10334 fprintf (file, _(" [Maverick float format]"));
10336 fprintf (file, _(" [FPA float format]"));
10338 if (flags & EF_ARM_APCS_FLOAT)
10339 fprintf (file, _(" [floats passed in float registers]"));
10341 if (flags & EF_ARM_PIC)
10342 fprintf (file, _(" [position independent]"));
10344 if (flags & EF_ARM_NEW_ABI)
10345 fprintf (file, _(" [new ABI]"));
10347 if (flags & EF_ARM_OLD_ABI)
10348 fprintf (file, _(" [old ABI]"));
10350 if (flags & EF_ARM_SOFT_FLOAT)
10351 fprintf (file, _(" [software FP]"));
10353 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10354 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10355 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10356 | EF_ARM_MAVERICK_FLOAT);
10359 case EF_ARM_EABI_VER1:
10360 fprintf (file, _(" [Version1 EABI]"));
10362 if (flags & EF_ARM_SYMSARESORTED)
10363 fprintf (file, _(" [sorted symbol table]"));
10365 fprintf (file, _(" [unsorted symbol table]"));
10367 flags &= ~ EF_ARM_SYMSARESORTED;
10370 case EF_ARM_EABI_VER2:
10371 fprintf (file, _(" [Version2 EABI]"));
10373 if (flags & EF_ARM_SYMSARESORTED)
10374 fprintf (file, _(" [sorted symbol table]"));
10376 fprintf (file, _(" [unsorted symbol table]"));
10378 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10379 fprintf (file, _(" [dynamic symbols use segment index]"));
10381 if (flags & EF_ARM_MAPSYMSFIRST)
10382 fprintf (file, _(" [mapping symbols precede others]"));
10384 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10385 | EF_ARM_MAPSYMSFIRST);
10388 case EF_ARM_EABI_VER3:
10389 fprintf (file, _(" [Version3 EABI]"));
10392 case EF_ARM_EABI_VER4:
10393 fprintf (file, _(" [Version4 EABI]"));
10396 case EF_ARM_EABI_VER5:
10397 fprintf (file, _(" [Version5 EABI]"));
10399 if (flags & EF_ARM_BE8)
10400 fprintf (file, _(" [BE8]"));
10402 if (flags & EF_ARM_LE8)
10403 fprintf (file, _(" [LE8]"));
10405 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10409 fprintf (file, _(" <EABI version unrecognised>"));
10413 flags &= ~ EF_ARM_EABIMASK;
10415 if (flags & EF_ARM_RELEXEC)
10416 fprintf (file, _(" [relocatable executable]"));
10418 if (flags & EF_ARM_HASENTRY)
10419 fprintf (file, _(" [has entry point]"));
10421 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10424 fprintf (file, _("<Unrecognised flag bits set>"));
10426 fputc ('\n', file);
10432 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10434 switch (ELF_ST_TYPE (elf_sym->st_info))
10436 case STT_ARM_TFUNC:
10437 return ELF_ST_TYPE (elf_sym->st_info);
10439 case STT_ARM_16BIT:
10440 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10441 This allows us to distinguish between data used by Thumb instructions
10442 and non-data (which is probably code) inside Thumb regions of an
10444 if (type != STT_OBJECT && type != STT_TLS)
10445 return ELF_ST_TYPE (elf_sym->st_info);
10456 elf32_arm_gc_mark_hook (asection *sec,
10457 struct bfd_link_info *info,
10458 Elf_Internal_Rela *rel,
10459 struct elf_link_hash_entry *h,
10460 Elf_Internal_Sym *sym)
10463 switch (ELF32_R_TYPE (rel->r_info))
10465 case R_ARM_GNU_VTINHERIT:
10466 case R_ARM_GNU_VTENTRY:
10470 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10473 /* Update the got entry reference counts for the section being removed. */
10476 elf32_arm_gc_sweep_hook (bfd * abfd,
10477 struct bfd_link_info * info,
10479 const Elf_Internal_Rela * relocs)
10481 Elf_Internal_Shdr *symtab_hdr;
10482 struct elf_link_hash_entry **sym_hashes;
10483 bfd_signed_vma *local_got_refcounts;
10484 const Elf_Internal_Rela *rel, *relend;
10485 struct elf32_arm_link_hash_table * globals;
10487 if (info->relocatable)
10490 globals = elf32_arm_hash_table (info);
10492 elf_section_data (sec)->local_dynrel = NULL;
10494 symtab_hdr = & elf_symtab_hdr (abfd);
10495 sym_hashes = elf_sym_hashes (abfd);
10496 local_got_refcounts = elf_local_got_refcounts (abfd);
10498 check_use_blx (globals);
10500 relend = relocs + sec->reloc_count;
10501 for (rel = relocs; rel < relend; rel++)
10503 unsigned long r_symndx;
10504 struct elf_link_hash_entry *h = NULL;
10507 r_symndx = ELF32_R_SYM (rel->r_info);
10508 if (r_symndx >= symtab_hdr->sh_info)
10510 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10511 while (h->root.type == bfd_link_hash_indirect
10512 || h->root.type == bfd_link_hash_warning)
10513 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10516 r_type = ELF32_R_TYPE (rel->r_info);
10517 r_type = arm_real_reloc_type (globals, r_type);
10521 case R_ARM_GOT_PREL:
10522 case R_ARM_TLS_GD32:
10523 case R_ARM_TLS_IE32:
10526 if (h->got.refcount > 0)
10527 h->got.refcount -= 1;
10529 else if (local_got_refcounts != NULL)
10531 if (local_got_refcounts[r_symndx] > 0)
10532 local_got_refcounts[r_symndx] -= 1;
10536 case R_ARM_TLS_LDM32:
10537 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10541 case R_ARM_ABS32_NOI:
10543 case R_ARM_REL32_NOI:
10549 case R_ARM_THM_CALL:
10550 case R_ARM_THM_JUMP24:
10551 case R_ARM_THM_JUMP19:
10552 case R_ARM_MOVW_ABS_NC:
10553 case R_ARM_MOVT_ABS:
10554 case R_ARM_MOVW_PREL_NC:
10555 case R_ARM_MOVT_PREL:
10556 case R_ARM_THM_MOVW_ABS_NC:
10557 case R_ARM_THM_MOVT_ABS:
10558 case R_ARM_THM_MOVW_PREL_NC:
10559 case R_ARM_THM_MOVT_PREL:
10560 /* Should the interworking branches be here also? */
10564 struct elf32_arm_link_hash_entry *eh;
10565 struct elf32_arm_relocs_copied **pp;
10566 struct elf32_arm_relocs_copied *p;
10568 eh = (struct elf32_arm_link_hash_entry *) h;
10570 if (h->plt.refcount > 0)
10572 h->plt.refcount -= 1;
10573 if (r_type == R_ARM_THM_CALL)
10574 eh->plt_maybe_thumb_refcount--;
10576 if (r_type == R_ARM_THM_JUMP24
10577 || r_type == R_ARM_THM_JUMP19)
10578 eh->plt_thumb_refcount--;
10581 if (r_type == R_ARM_ABS32
10582 || r_type == R_ARM_REL32
10583 || r_type == R_ARM_ABS32_NOI
10584 || r_type == R_ARM_REL32_NOI)
10586 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10588 if (p->section == sec)
10591 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10592 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10610 /* Look through the relocs for a section during the first phase. */
10613 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10614 asection *sec, const Elf_Internal_Rela *relocs)
10616 Elf_Internal_Shdr *symtab_hdr;
10617 struct elf_link_hash_entry **sym_hashes;
10618 const Elf_Internal_Rela *rel;
10619 const Elf_Internal_Rela *rel_end;
10622 bfd_vma *local_got_offsets;
10623 struct elf32_arm_link_hash_table *htab;
10624 bfd_boolean needs_plt;
10625 unsigned long nsyms;
10627 if (info->relocatable)
10630 BFD_ASSERT (is_arm_elf (abfd));
10632 htab = elf32_arm_hash_table (info);
10635 /* Create dynamic sections for relocatable executables so that we can
10636 copy relocations. */
10637 if (htab->root.is_relocatable_executable
10638 && ! htab->root.dynamic_sections_created)
10640 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10644 dynobj = elf_hash_table (info)->dynobj;
10645 local_got_offsets = elf_local_got_offsets (abfd);
10647 symtab_hdr = & elf_symtab_hdr (abfd);
10648 sym_hashes = elf_sym_hashes (abfd);
10649 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10651 rel_end = relocs + sec->reloc_count;
10652 for (rel = relocs; rel < rel_end; rel++)
10654 struct elf_link_hash_entry *h;
10655 struct elf32_arm_link_hash_entry *eh;
10656 unsigned long r_symndx;
10659 r_symndx = ELF32_R_SYM (rel->r_info);
10660 r_type = ELF32_R_TYPE (rel->r_info);
10661 r_type = arm_real_reloc_type (htab, r_type);
10663 if (r_symndx >= nsyms
10664 /* PR 9934: It is possible to have relocations that do not
10665 refer to symbols, thus it is also possible to have an
10666 object file containing relocations but no symbol table. */
10667 && (r_symndx > 0 || nsyms > 0))
10669 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10674 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10678 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10679 while (h->root.type == bfd_link_hash_indirect
10680 || h->root.type == bfd_link_hash_warning)
10681 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10684 eh = (struct elf32_arm_link_hash_entry *) h;
10689 case R_ARM_GOT_PREL:
10690 case R_ARM_TLS_GD32:
10691 case R_ARM_TLS_IE32:
10692 /* This symbol requires a global offset table entry. */
10694 int tls_type, old_tls_type;
10698 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10699 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10700 default: tls_type = GOT_NORMAL; break;
10706 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10710 bfd_signed_vma *local_got_refcounts;
10712 /* This is a global offset table entry for a local symbol. */
10713 local_got_refcounts = elf_local_got_refcounts (abfd);
10714 if (local_got_refcounts == NULL)
10716 bfd_size_type size;
10718 size = symtab_hdr->sh_info;
10719 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10720 local_got_refcounts = bfd_zalloc (abfd, size);
10721 if (local_got_refcounts == NULL)
10723 elf_local_got_refcounts (abfd) = local_got_refcounts;
10724 elf32_arm_local_got_tls_type (abfd)
10725 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10727 local_got_refcounts[r_symndx] += 1;
10728 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10731 /* We will already have issued an error message if there is a
10732 TLS / non-TLS mismatch, based on the symbol type. We don't
10733 support any linker relaxations. So just combine any TLS
10735 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10736 && tls_type != GOT_NORMAL)
10737 tls_type |= old_tls_type;
10739 if (old_tls_type != tls_type)
10742 elf32_arm_hash_entry (h)->tls_type = tls_type;
10744 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10747 /* Fall through. */
10749 case R_ARM_TLS_LDM32:
10750 if (r_type == R_ARM_TLS_LDM32)
10751 htab->tls_ldm_got.refcount++;
10752 /* Fall through. */
10754 case R_ARM_GOTOFF32:
10756 if (htab->sgot == NULL)
10758 if (htab->root.dynobj == NULL)
10759 htab->root.dynobj = abfd;
10760 if (!create_got_section (htab->root.dynobj, info))
10766 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10767 ldr __GOTT_INDEX__ offsets. */
10768 if (!htab->vxworks_p)
10770 /* Fall through. */
10777 case R_ARM_THM_CALL:
10778 case R_ARM_THM_JUMP24:
10779 case R_ARM_THM_JUMP19:
10783 case R_ARM_MOVW_ABS_NC:
10784 case R_ARM_MOVT_ABS:
10785 case R_ARM_THM_MOVW_ABS_NC:
10786 case R_ARM_THM_MOVT_ABS:
10789 (*_bfd_error_handler)
10790 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10791 abfd, elf32_arm_howto_table_1[r_type].name,
10792 (h) ? h->root.root.string : "a local symbol");
10793 bfd_set_error (bfd_error_bad_value);
10797 /* Fall through. */
10799 case R_ARM_ABS32_NOI:
10801 case R_ARM_REL32_NOI:
10802 case R_ARM_MOVW_PREL_NC:
10803 case R_ARM_MOVT_PREL:
10804 case R_ARM_THM_MOVW_PREL_NC:
10805 case R_ARM_THM_MOVT_PREL:
10809 /* Should the interworking branches be listed here? */
10812 /* If this reloc is in a read-only section, we might
10813 need a copy reloc. We can't check reliably at this
10814 stage whether the section is read-only, as input
10815 sections have not yet been mapped to output sections.
10816 Tentatively set the flag for now, and correct in
10817 adjust_dynamic_symbol. */
10819 h->non_got_ref = 1;
10821 /* We may need a .plt entry if the function this reloc
10822 refers to is in a different object. We can't tell for
10823 sure yet, because something later might force the
10828 /* If we create a PLT entry, this relocation will reference
10829 it, even if it's an ABS32 relocation. */
10830 h->plt.refcount += 1;
10832 /* It's too early to use htab->use_blx here, so we have to
10833 record possible blx references separately from
10834 relocs that definitely need a thumb stub. */
10836 if (r_type == R_ARM_THM_CALL)
10837 eh->plt_maybe_thumb_refcount += 1;
10839 if (r_type == R_ARM_THM_JUMP24
10840 || r_type == R_ARM_THM_JUMP19)
10841 eh->plt_thumb_refcount += 1;
10844 /* If we are creating a shared library or relocatable executable,
10845 and this is a reloc against a global symbol, or a non PC
10846 relative reloc against a local symbol, then we need to copy
10847 the reloc into the shared library. However, if we are linking
10848 with -Bsymbolic, we do not need to copy a reloc against a
10849 global symbol which is defined in an object we are
10850 including in the link (i.e., DEF_REGULAR is set). At
10851 this point we have not seen all the input files, so it is
10852 possible that DEF_REGULAR is not set now but will be set
10853 later (it is never cleared). We account for that
10854 possibility below by storing information in the
10855 relocs_copied field of the hash table entry. */
10856 if ((info->shared || htab->root.is_relocatable_executable)
10857 && (sec->flags & SEC_ALLOC) != 0
10858 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10859 || (h != NULL && ! h->needs_plt
10860 && (! info->symbolic || ! h->def_regular))))
10862 struct elf32_arm_relocs_copied *p, **head;
10864 /* When creating a shared object, we must copy these
10865 reloc types into the output file. We create a reloc
10866 section in dynobj and make room for this reloc. */
10867 if (sreloc == NULL)
10869 sreloc = _bfd_elf_make_dynamic_reloc_section
10870 (sec, dynobj, 2, abfd, ! htab->use_rel);
10872 if (sreloc == NULL)
10875 /* BPABI objects never have dynamic relocations mapped. */
10876 if (htab->symbian_p)
10880 flags = bfd_get_section_flags (dynobj, sreloc);
10881 flags &= ~(SEC_LOAD | SEC_ALLOC);
10882 bfd_set_section_flags (dynobj, sreloc, flags);
10886 /* If this is a global symbol, we count the number of
10887 relocations we need for this symbol. */
10890 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10894 /* Track dynamic relocs needed for local syms too.
10895 We really need local syms available to do this
10896 easily. Oh well. */
10899 Elf_Internal_Sym *isym;
10901 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10906 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10910 vpp = &elf_section_data (s)->local_dynrel;
10911 head = (struct elf32_arm_relocs_copied **) vpp;
10915 if (p == NULL || p->section != sec)
10917 bfd_size_type amt = sizeof *p;
10919 p = bfd_alloc (htab->root.dynobj, amt);
10929 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10935 /* This relocation describes the C++ object vtable hierarchy.
10936 Reconstruct it for later use during GC. */
10937 case R_ARM_GNU_VTINHERIT:
10938 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10942 /* This relocation describes which C++ vtable entries are actually
10943 used. Record for later use during GC. */
10944 case R_ARM_GNU_VTENTRY:
10945 BFD_ASSERT (h != NULL);
10947 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10956 /* Unwinding tables are not referenced directly. This pass marks them as
10957 required if the corresponding code section is marked. */
10960 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10961 elf_gc_mark_hook_fn gc_mark_hook)
10964 Elf_Internal_Shdr **elf_shdrp;
10967 /* Marking EH data may cause additional code sections to be marked,
10968 requiring multiple passes. */
10973 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10977 if (! is_arm_elf (sub))
10980 elf_shdrp = elf_elfsections (sub);
10981 for (o = sub->sections; o != NULL; o = o->next)
10983 Elf_Internal_Shdr *hdr;
10985 hdr = &elf_section_data (o)->this_hdr;
10986 if (hdr->sh_type == SHT_ARM_EXIDX
10988 && hdr->sh_link < elf_numsections (sub)
10990 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10993 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11003 /* Treat mapping symbols as special target symbols. */
11006 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11008 return bfd_is_arm_special_symbol_name (sym->name,
11009 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11012 /* This is a copy of elf_find_function() from elf.c except that
11013 ARM mapping symbols are ignored when looking for function names
11014 and STT_ARM_TFUNC is considered to a function type. */
11017 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11018 asection * section,
11019 asymbol ** symbols,
11021 const char ** filename_ptr,
11022 const char ** functionname_ptr)
11024 const char * filename = NULL;
11025 asymbol * func = NULL;
11026 bfd_vma low_func = 0;
11029 for (p = symbols; *p != NULL; p++)
11031 elf_symbol_type *q;
11033 q = (elf_symbol_type *) *p;
11035 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11040 filename = bfd_asymbol_name (&q->symbol);
11043 case STT_ARM_TFUNC:
11045 /* Skip mapping symbols. */
11046 if ((q->symbol.flags & BSF_LOCAL)
11047 && bfd_is_arm_special_symbol_name (q->symbol.name,
11048 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11050 /* Fall through. */
11051 if (bfd_get_section (&q->symbol) == section
11052 && q->symbol.value >= low_func
11053 && q->symbol.value <= offset)
11055 func = (asymbol *) q;
11056 low_func = q->symbol.value;
11066 *filename_ptr = filename;
11067 if (functionname_ptr)
11068 *functionname_ptr = bfd_asymbol_name (func);
11074 /* Find the nearest line to a particular section and offset, for error
11075 reporting. This code is a duplicate of the code in elf.c, except
11076 that it uses arm_elf_find_function. */
11079 elf32_arm_find_nearest_line (bfd * abfd,
11080 asection * section,
11081 asymbol ** symbols,
11083 const char ** filename_ptr,
11084 const char ** functionname_ptr,
11085 unsigned int * line_ptr)
11087 bfd_boolean found = FALSE;
11089 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11091 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11092 filename_ptr, functionname_ptr,
11094 & elf_tdata (abfd)->dwarf2_find_line_info))
11096 if (!*functionname_ptr)
11097 arm_elf_find_function (abfd, section, symbols, offset,
11098 *filename_ptr ? NULL : filename_ptr,
11104 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11105 & found, filename_ptr,
11106 functionname_ptr, line_ptr,
11107 & elf_tdata (abfd)->line_info))
11110 if (found && (*functionname_ptr || *line_ptr))
11113 if (symbols == NULL)
11116 if (! arm_elf_find_function (abfd, section, symbols, offset,
11117 filename_ptr, functionname_ptr))
11125 elf32_arm_find_inliner_info (bfd * abfd,
11126 const char ** filename_ptr,
11127 const char ** functionname_ptr,
11128 unsigned int * line_ptr)
11131 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11132 functionname_ptr, line_ptr,
11133 & elf_tdata (abfd)->dwarf2_find_line_info);
11137 /* Adjust a symbol defined by a dynamic object and referenced by a
11138 regular object. The current definition is in some section of the
11139 dynamic object, but we're not including those sections. We have to
11140 change the definition to something the rest of the link can
11144 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11145 struct elf_link_hash_entry * h)
11149 struct elf32_arm_link_hash_entry * eh;
11150 struct elf32_arm_link_hash_table *globals;
11152 globals = elf32_arm_hash_table (info);
11153 dynobj = elf_hash_table (info)->dynobj;
11155 /* Make sure we know what is going on here. */
11156 BFD_ASSERT (dynobj != NULL
11158 || h->u.weakdef != NULL
11161 && !h->def_regular)));
11163 eh = (struct elf32_arm_link_hash_entry *) h;
11165 /* If this is a function, put it in the procedure linkage table. We
11166 will fill in the contents of the procedure linkage table later,
11167 when we know the address of the .got section. */
11168 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11171 if (h->plt.refcount <= 0
11172 || SYMBOL_CALLS_LOCAL (info, h)
11173 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11174 && h->root.type == bfd_link_hash_undefweak))
11176 /* This case can occur if we saw a PLT32 reloc in an input
11177 file, but the symbol was never referred to by a dynamic
11178 object, or if all references were garbage collected. In
11179 such a case, we don't actually need to build a procedure
11180 linkage table, and we can just do a PC24 reloc instead. */
11181 h->plt.offset = (bfd_vma) -1;
11182 eh->plt_thumb_refcount = 0;
11183 eh->plt_maybe_thumb_refcount = 0;
11191 /* It's possible that we incorrectly decided a .plt reloc was
11192 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11193 in check_relocs. We can't decide accurately between function
11194 and non-function syms in check-relocs; Objects loaded later in
11195 the link may change h->type. So fix it now. */
11196 h->plt.offset = (bfd_vma) -1;
11197 eh->plt_thumb_refcount = 0;
11198 eh->plt_maybe_thumb_refcount = 0;
11201 /* If this is a weak symbol, and there is a real definition, the
11202 processor independent code will have arranged for us to see the
11203 real definition first, and we can just use the same value. */
11204 if (h->u.weakdef != NULL)
11206 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11207 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11208 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11209 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11213 /* If there are no non-GOT references, we do not need a copy
11215 if (!h->non_got_ref)
11218 /* This is a reference to a symbol defined by a dynamic object which
11219 is not a function. */
11221 /* If we are creating a shared library, we must presume that the
11222 only references to the symbol are via the global offset table.
11223 For such cases we need not do anything here; the relocations will
11224 be handled correctly by relocate_section. Relocatable executables
11225 can reference data in shared objects directly, so we don't need to
11226 do anything here. */
11227 if (info->shared || globals->root.is_relocatable_executable)
11232 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11233 h->root.root.string);
11237 /* We must allocate the symbol in our .dynbss section, which will
11238 become part of the .bss section of the executable. There will be
11239 an entry for this symbol in the .dynsym section. The dynamic
11240 object will contain position independent code, so all references
11241 from the dynamic object to this symbol will go through the global
11242 offset table. The dynamic linker will use the .dynsym entry to
11243 determine the address it must put in the global offset table, so
11244 both the dynamic object and the regular object will refer to the
11245 same memory location for the variable. */
11246 s = bfd_get_section_by_name (dynobj, ".dynbss");
11247 BFD_ASSERT (s != NULL);
11249 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11250 copy the initial value out of the dynamic object and into the
11251 runtime process image. We need to remember the offset into the
11252 .rel(a).bss section we are going to use. */
11253 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11257 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11258 BFD_ASSERT (srel != NULL);
11259 srel->size += RELOC_SIZE (globals);
11263 return _bfd_elf_adjust_dynamic_copy (h, s);
11266 /* Allocate space in .plt, .got and associated reloc sections for
11270 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11272 struct bfd_link_info *info;
11273 struct elf32_arm_link_hash_table *htab;
11274 struct elf32_arm_link_hash_entry *eh;
11275 struct elf32_arm_relocs_copied *p;
11276 bfd_signed_vma thumb_refs;
11278 eh = (struct elf32_arm_link_hash_entry *) h;
11280 if (h->root.type == bfd_link_hash_indirect)
11283 if (h->root.type == bfd_link_hash_warning)
11284 /* When warning symbols are created, they **replace** the "real"
11285 entry in the hash table, thus we never get to see the real
11286 symbol in a hash traversal. So look at it now. */
11287 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11289 info = (struct bfd_link_info *) inf;
11290 htab = elf32_arm_hash_table (info);
11292 if (htab->root.dynamic_sections_created
11293 && h->plt.refcount > 0)
11295 /* Make sure this symbol is output as a dynamic symbol.
11296 Undefined weak syms won't yet be marked as dynamic. */
11297 if (h->dynindx == -1
11298 && !h->forced_local)
11300 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11305 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11307 asection *s = htab->splt;
11309 /* If this is the first .plt entry, make room for the special
11312 s->size += htab->plt_header_size;
11314 h->plt.offset = s->size;
11316 /* If we will insert a Thumb trampoline before this PLT, leave room
11318 thumb_refs = eh->plt_thumb_refcount;
11319 if (!htab->use_blx)
11320 thumb_refs += eh->plt_maybe_thumb_refcount;
11322 if (thumb_refs > 0)
11324 h->plt.offset += PLT_THUMB_STUB_SIZE;
11325 s->size += PLT_THUMB_STUB_SIZE;
11328 /* If this symbol is not defined in a regular file, and we are
11329 not generating a shared library, then set the symbol to this
11330 location in the .plt. This is required to make function
11331 pointers compare as equal between the normal executable and
11332 the shared library. */
11334 && !h->def_regular)
11336 h->root.u.def.section = s;
11337 h->root.u.def.value = h->plt.offset;
11340 /* Make sure the function is not marked as Thumb, in case
11341 it is the target of an ABS32 relocation, which will
11342 point to the PLT entry. */
11343 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11344 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11346 /* Make room for this entry. */
11347 s->size += htab->plt_entry_size;
11349 if (!htab->symbian_p)
11351 /* We also need to make an entry in the .got.plt section, which
11352 will be placed in the .got section by the linker script. */
11353 eh->plt_got_offset = htab->sgotplt->size;
11354 htab->sgotplt->size += 4;
11357 /* We also need to make an entry in the .rel(a).plt section. */
11358 htab->srelplt->size += RELOC_SIZE (htab);
11360 /* VxWorks executables have a second set of relocations for
11361 each PLT entry. They go in a separate relocation section,
11362 which is processed by the kernel loader. */
11363 if (htab->vxworks_p && !info->shared)
11365 /* There is a relocation for the initial PLT entry:
11366 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11367 if (h->plt.offset == htab->plt_header_size)
11368 htab->srelplt2->size += RELOC_SIZE (htab);
11370 /* There are two extra relocations for each subsequent
11371 PLT entry: an R_ARM_32 relocation for the GOT entry,
11372 and an R_ARM_32 relocation for the PLT entry. */
11373 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11378 h->plt.offset = (bfd_vma) -1;
11384 h->plt.offset = (bfd_vma) -1;
11388 if (h->got.refcount > 0)
11392 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11395 /* Make sure this symbol is output as a dynamic symbol.
11396 Undefined weak syms won't yet be marked as dynamic. */
11397 if (h->dynindx == -1
11398 && !h->forced_local)
11400 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11404 if (!htab->symbian_p)
11407 h->got.offset = s->size;
11409 if (tls_type == GOT_UNKNOWN)
11412 if (tls_type == GOT_NORMAL)
11413 /* Non-TLS symbols need one GOT slot. */
11417 if (tls_type & GOT_TLS_GD)
11418 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11420 if (tls_type & GOT_TLS_IE)
11421 /* R_ARM_TLS_IE32 needs one GOT slot. */
11425 dyn = htab->root.dynamic_sections_created;
11428 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11430 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11433 if (tls_type != GOT_NORMAL
11434 && (info->shared || indx != 0)
11435 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11436 || h->root.type != bfd_link_hash_undefweak))
11438 if (tls_type & GOT_TLS_IE)
11439 htab->srelgot->size += RELOC_SIZE (htab);
11441 if (tls_type & GOT_TLS_GD)
11442 htab->srelgot->size += RELOC_SIZE (htab);
11444 if ((tls_type & GOT_TLS_GD) && indx != 0)
11445 htab->srelgot->size += RELOC_SIZE (htab);
11447 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11448 || h->root.type != bfd_link_hash_undefweak)
11450 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11451 htab->srelgot->size += RELOC_SIZE (htab);
11455 h->got.offset = (bfd_vma) -1;
11457 /* Allocate stubs for exported Thumb functions on v4t. */
11458 if (!htab->use_blx && h->dynindx != -1
11460 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11461 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11463 struct elf_link_hash_entry * th;
11464 struct bfd_link_hash_entry * bh;
11465 struct elf_link_hash_entry * myh;
11469 /* Create a new symbol to regist the real location of the function. */
11470 s = h->root.u.def.section;
11471 sprintf (name, "__real_%s", h->root.root.string);
11472 _bfd_generic_link_add_one_symbol (info, s->owner,
11473 name, BSF_GLOBAL, s,
11474 h->root.u.def.value,
11475 NULL, TRUE, FALSE, &bh);
11477 myh = (struct elf_link_hash_entry *) bh;
11478 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11479 myh->forced_local = 1;
11480 eh->export_glue = myh;
11481 th = record_arm_to_thumb_glue (info, h);
11482 /* Point the symbol at the stub. */
11483 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11484 h->root.u.def.section = th->root.u.def.section;
11485 h->root.u.def.value = th->root.u.def.value & ~1;
11488 if (eh->relocs_copied == NULL)
11491 /* In the shared -Bsymbolic case, discard space allocated for
11492 dynamic pc-relative relocs against symbols which turn out to be
11493 defined in regular objects. For the normal shared case, discard
11494 space for pc-relative relocs that have become local due to symbol
11495 visibility changes. */
11497 if (info->shared || htab->root.is_relocatable_executable)
11499 /* The only relocs that use pc_count are R_ARM_REL32 and
11500 R_ARM_REL32_NOI, which will appear on something like
11501 ".long foo - .". We want calls to protected symbols to resolve
11502 directly to the function rather than going via the plt. If people
11503 want function pointer comparisons to work as expected then they
11504 should avoid writing assembly like ".long foo - .". */
11505 if (SYMBOL_CALLS_LOCAL (info, h))
11507 struct elf32_arm_relocs_copied **pp;
11509 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11511 p->count -= p->pc_count;
11520 if (elf32_arm_hash_table (info)->vxworks_p)
11522 struct elf32_arm_relocs_copied **pp;
11524 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11526 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11533 /* Also discard relocs on undefined weak syms with non-default
11535 if (eh->relocs_copied != NULL
11536 && h->root.type == bfd_link_hash_undefweak)
11538 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11539 eh->relocs_copied = NULL;
11541 /* Make sure undefined weak symbols are output as a dynamic
11543 else if (h->dynindx == -1
11544 && !h->forced_local)
11546 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11551 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11552 && h->root.type == bfd_link_hash_new)
11554 /* Output absolute symbols so that we can create relocations
11555 against them. For normal symbols we output a relocation
11556 against the section that contains them. */
11557 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11564 /* For the non-shared case, discard space for relocs against
11565 symbols which turn out to need copy relocs or are not
11568 if (!h->non_got_ref
11569 && ((h->def_dynamic
11570 && !h->def_regular)
11571 || (htab->root.dynamic_sections_created
11572 && (h->root.type == bfd_link_hash_undefweak
11573 || h->root.type == bfd_link_hash_undefined))))
11575 /* Make sure this symbol is output as a dynamic symbol.
11576 Undefined weak syms won't yet be marked as dynamic. */
11577 if (h->dynindx == -1
11578 && !h->forced_local)
11580 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11584 /* If that succeeded, we know we'll be keeping all the
11586 if (h->dynindx != -1)
11590 eh->relocs_copied = NULL;
11595 /* Finally, allocate space. */
11596 for (p = eh->relocs_copied; p != NULL; p = p->next)
11598 asection *sreloc = elf_section_data (p->section)->sreloc;
11599 sreloc->size += p->count * RELOC_SIZE (htab);
11605 /* Find any dynamic relocs that apply to read-only sections. */
11608 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11610 struct elf32_arm_link_hash_entry * eh;
11611 struct elf32_arm_relocs_copied * p;
11613 if (h->root.type == bfd_link_hash_warning)
11614 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11616 eh = (struct elf32_arm_link_hash_entry *) h;
11617 for (p = eh->relocs_copied; p != NULL; p = p->next)
11619 asection *s = p->section;
11621 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11623 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11625 info->flags |= DF_TEXTREL;
11627 /* Not an error, just cut short the traversal. */
11635 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11638 struct elf32_arm_link_hash_table *globals;
11640 globals = elf32_arm_hash_table (info);
11641 globals->byteswap_code = byteswap_code;
11644 /* Set the sizes of the dynamic sections. */
11647 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11648 struct bfd_link_info * info)
11653 bfd_boolean relocs;
11655 struct elf32_arm_link_hash_table *htab;
11657 htab = elf32_arm_hash_table (info);
11658 dynobj = elf_hash_table (info)->dynobj;
11659 BFD_ASSERT (dynobj != NULL);
11660 check_use_blx (htab);
11662 if (elf_hash_table (info)->dynamic_sections_created)
11664 /* Set the contents of the .interp section to the interpreter. */
11665 if (info->executable)
11667 s = bfd_get_section_by_name (dynobj, ".interp");
11668 BFD_ASSERT (s != NULL);
11669 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11670 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11674 /* Set up .got offsets for local syms, and space for local dynamic
11676 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11678 bfd_signed_vma *local_got;
11679 bfd_signed_vma *end_local_got;
11680 char *local_tls_type;
11681 bfd_size_type locsymcount;
11682 Elf_Internal_Shdr *symtab_hdr;
11684 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11686 if (! is_arm_elf (ibfd))
11689 for (s = ibfd->sections; s != NULL; s = s->next)
11691 struct elf32_arm_relocs_copied *p;
11693 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11695 if (!bfd_is_abs_section (p->section)
11696 && bfd_is_abs_section (p->section->output_section))
11698 /* Input section has been discarded, either because
11699 it is a copy of a linkonce section or due to
11700 linker script /DISCARD/, so we'll be discarding
11703 else if (is_vxworks
11704 && strcmp (p->section->output_section->name,
11707 /* Relocations in vxworks .tls_vars sections are
11708 handled specially by the loader. */
11710 else if (p->count != 0)
11712 srel = elf_section_data (p->section)->sreloc;
11713 srel->size += p->count * RELOC_SIZE (htab);
11714 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11715 info->flags |= DF_TEXTREL;
11720 local_got = elf_local_got_refcounts (ibfd);
11724 symtab_hdr = & elf_symtab_hdr (ibfd);
11725 locsymcount = symtab_hdr->sh_info;
11726 end_local_got = local_got + locsymcount;
11727 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11729 srel = htab->srelgot;
11730 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11732 if (*local_got > 0)
11734 *local_got = s->size;
11735 if (*local_tls_type & GOT_TLS_GD)
11736 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11738 if (*local_tls_type & GOT_TLS_IE)
11740 if (*local_tls_type == GOT_NORMAL)
11743 if (info->shared || *local_tls_type == GOT_TLS_GD)
11744 srel->size += RELOC_SIZE (htab);
11747 *local_got = (bfd_vma) -1;
11751 if (htab->tls_ldm_got.refcount > 0)
11753 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11754 for R_ARM_TLS_LDM32 relocations. */
11755 htab->tls_ldm_got.offset = htab->sgot->size;
11756 htab->sgot->size += 8;
11758 htab->srelgot->size += RELOC_SIZE (htab);
11761 htab->tls_ldm_got.offset = -1;
11763 /* Allocate global sym .plt and .got entries, and space for global
11764 sym dynamic relocs. */
11765 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11767 /* Here we rummage through the found bfds to collect glue information. */
11768 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11770 if (! is_arm_elf (ibfd))
11773 /* Initialise mapping tables for code/data. */
11774 bfd_elf32_arm_init_maps (ibfd);
11776 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11777 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11778 /* xgettext:c-format */
11779 _bfd_error_handler (_("Errors encountered processing file %s"),
11783 /* Allocate space for the glue sections now that we've sized them. */
11784 bfd_elf32_arm_allocate_interworking_sections (info);
11786 /* The check_relocs and adjust_dynamic_symbol entry points have
11787 determined the sizes of the various dynamic sections. Allocate
11788 memory for them. */
11791 for (s = dynobj->sections; s != NULL; s = s->next)
11795 if ((s->flags & SEC_LINKER_CREATED) == 0)
11798 /* It's OK to base decisions on the section name, because none
11799 of the dynobj section names depend upon the input files. */
11800 name = bfd_get_section_name (dynobj, s);
11802 if (strcmp (name, ".plt") == 0)
11804 /* Remember whether there is a PLT. */
11805 plt = s->size != 0;
11807 else if (CONST_STRNEQ (name, ".rel"))
11811 /* Remember whether there are any reloc sections other
11812 than .rel(a).plt and .rela.plt.unloaded. */
11813 if (s != htab->srelplt && s != htab->srelplt2)
11816 /* We use the reloc_count field as a counter if we need
11817 to copy relocs into the output file. */
11818 s->reloc_count = 0;
11821 else if (! CONST_STRNEQ (name, ".got")
11822 && strcmp (name, ".dynbss") != 0)
11824 /* It's not one of our sections, so don't allocate space. */
11830 /* If we don't need this section, strip it from the
11831 output file. This is mostly to handle .rel(a).bss and
11832 .rel(a).plt. We must create both sections in
11833 create_dynamic_sections, because they must be created
11834 before the linker maps input sections to output
11835 sections. The linker does that before
11836 adjust_dynamic_symbol is called, and it is that
11837 function which decides whether anything needs to go
11838 into these sections. */
11839 s->flags |= SEC_EXCLUDE;
11843 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11846 /* Allocate memory for the section contents. */
11847 s->contents = bfd_zalloc (dynobj, s->size);
11848 if (s->contents == NULL)
11852 if (elf_hash_table (info)->dynamic_sections_created)
11854 /* Add some entries to the .dynamic section. We fill in the
11855 values later, in elf32_arm_finish_dynamic_sections, but we
11856 must add the entries now so that we get the correct size for
11857 the .dynamic section. The DT_DEBUG entry is filled in by the
11858 dynamic linker and used by the debugger. */
11859 #define add_dynamic_entry(TAG, VAL) \
11860 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11862 if (info->executable)
11864 if (!add_dynamic_entry (DT_DEBUG, 0))
11870 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11871 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11872 || !add_dynamic_entry (DT_PLTREL,
11873 htab->use_rel ? DT_REL : DT_RELA)
11874 || !add_dynamic_entry (DT_JMPREL, 0))
11882 if (!add_dynamic_entry (DT_REL, 0)
11883 || !add_dynamic_entry (DT_RELSZ, 0)
11884 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11889 if (!add_dynamic_entry (DT_RELA, 0)
11890 || !add_dynamic_entry (DT_RELASZ, 0)
11891 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11896 /* If any dynamic relocs apply to a read-only section,
11897 then we need a DT_TEXTREL entry. */
11898 if ((info->flags & DF_TEXTREL) == 0)
11899 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11902 if ((info->flags & DF_TEXTREL) != 0)
11904 if (!add_dynamic_entry (DT_TEXTREL, 0))
11907 if (htab->vxworks_p
11908 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11911 #undef add_dynamic_entry
11916 /* Finish up dynamic symbol handling. We set the contents of various
11917 dynamic sections here. */
11920 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11921 struct bfd_link_info * info,
11922 struct elf_link_hash_entry * h,
11923 Elf_Internal_Sym * sym)
11926 struct elf32_arm_link_hash_table *htab;
11927 struct elf32_arm_link_hash_entry *eh;
11929 dynobj = elf_hash_table (info)->dynobj;
11930 htab = elf32_arm_hash_table (info);
11931 eh = (struct elf32_arm_link_hash_entry *) h;
11933 if (h->plt.offset != (bfd_vma) -1)
11939 Elf_Internal_Rela rel;
11941 /* This symbol has an entry in the procedure linkage table. Set
11944 BFD_ASSERT (h->dynindx != -1);
11946 splt = bfd_get_section_by_name (dynobj, ".plt");
11947 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11948 BFD_ASSERT (splt != NULL && srel != NULL);
11950 /* Fill in the entry in the procedure linkage table. */
11951 if (htab->symbian_p)
11953 put_arm_insn (htab, output_bfd,
11954 elf32_arm_symbian_plt_entry[0],
11955 splt->contents + h->plt.offset);
11956 bfd_put_32 (output_bfd,
11957 elf32_arm_symbian_plt_entry[1],
11958 splt->contents + h->plt.offset + 4);
11960 /* Fill in the entry in the .rel.plt section. */
11961 rel.r_offset = (splt->output_section->vma
11962 + splt->output_offset
11963 + h->plt.offset + 4);
11964 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11966 /* Get the index in the procedure linkage table which
11967 corresponds to this symbol. This is the index of this symbol
11968 in all the symbols for which we are making plt entries. The
11969 first entry in the procedure linkage table is reserved. */
11970 plt_index = ((h->plt.offset - htab->plt_header_size)
11971 / htab->plt_entry_size);
11975 bfd_vma got_offset, got_address, plt_address;
11976 bfd_vma got_displacement;
11980 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11981 BFD_ASSERT (sgot != NULL);
11983 /* Get the offset into the .got.plt table of the entry that
11984 corresponds to this function. */
11985 got_offset = eh->plt_got_offset;
11987 /* Get the index in the procedure linkage table which
11988 corresponds to this symbol. This is the index of this symbol
11989 in all the symbols for which we are making plt entries. The
11990 first three entries in .got.plt are reserved; after that
11991 symbols appear in the same order as in .plt. */
11992 plt_index = (got_offset - 12) / 4;
11994 /* Calculate the address of the GOT entry. */
11995 got_address = (sgot->output_section->vma
11996 + sgot->output_offset
11999 /* ...and the address of the PLT entry. */
12000 plt_address = (splt->output_section->vma
12001 + splt->output_offset
12004 ptr = htab->splt->contents + h->plt.offset;
12005 if (htab->vxworks_p && info->shared)
12010 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12012 val = elf32_arm_vxworks_shared_plt_entry[i];
12014 val |= got_address - sgot->output_section->vma;
12016 val |= plt_index * RELOC_SIZE (htab);
12017 if (i == 2 || i == 5)
12018 bfd_put_32 (output_bfd, val, ptr);
12020 put_arm_insn (htab, output_bfd, val, ptr);
12023 else if (htab->vxworks_p)
12028 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12030 val = elf32_arm_vxworks_exec_plt_entry[i];
12032 val |= got_address;
12034 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12036 val |= plt_index * RELOC_SIZE (htab);
12037 if (i == 2 || i == 5)
12038 bfd_put_32 (output_bfd, val, ptr);
12040 put_arm_insn (htab, output_bfd, val, ptr);
12043 loc = (htab->srelplt2->contents
12044 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12046 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12047 referencing the GOT for this PLT entry. */
12048 rel.r_offset = plt_address + 8;
12049 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12050 rel.r_addend = got_offset;
12051 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12052 loc += RELOC_SIZE (htab);
12054 /* Create the R_ARM_ABS32 relocation referencing the
12055 beginning of the PLT for this GOT entry. */
12056 rel.r_offset = got_address;
12057 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12059 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12063 bfd_signed_vma thumb_refs;
12064 /* Calculate the displacement between the PLT slot and the
12065 entry in the GOT. The eight-byte offset accounts for the
12066 value produced by adding to pc in the first instruction
12067 of the PLT stub. */
12068 got_displacement = got_address - (plt_address + 8);
12070 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12072 thumb_refs = eh->plt_thumb_refcount;
12073 if (!htab->use_blx)
12074 thumb_refs += eh->plt_maybe_thumb_refcount;
12076 if (thumb_refs > 0)
12078 put_thumb_insn (htab, output_bfd,
12079 elf32_arm_plt_thumb_stub[0], ptr - 4);
12080 put_thumb_insn (htab, output_bfd,
12081 elf32_arm_plt_thumb_stub[1], ptr - 2);
12084 put_arm_insn (htab, output_bfd,
12085 elf32_arm_plt_entry[0]
12086 | ((got_displacement & 0x0ff00000) >> 20),
12088 put_arm_insn (htab, output_bfd,
12089 elf32_arm_plt_entry[1]
12090 | ((got_displacement & 0x000ff000) >> 12),
12092 put_arm_insn (htab, output_bfd,
12093 elf32_arm_plt_entry[2]
12094 | (got_displacement & 0x00000fff),
12096 #ifdef FOUR_WORD_PLT
12097 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12101 /* Fill in the entry in the global offset table. */
12102 bfd_put_32 (output_bfd,
12103 (splt->output_section->vma
12104 + splt->output_offset),
12105 sgot->contents + got_offset);
12107 /* Fill in the entry in the .rel(a).plt section. */
12109 rel.r_offset = got_address;
12110 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12113 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12114 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12116 if (!h->def_regular)
12118 /* Mark the symbol as undefined, rather than as defined in
12119 the .plt section. Leave the value alone. */
12120 sym->st_shndx = SHN_UNDEF;
12121 /* If the symbol is weak, we do need to clear the value.
12122 Otherwise, the PLT entry would provide a definition for
12123 the symbol even if the symbol wasn't defined anywhere,
12124 and so the symbol would never be NULL. */
12125 if (!h->ref_regular_nonweak)
12130 if (h->got.offset != (bfd_vma) -1
12131 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12132 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12136 Elf_Internal_Rela rel;
12140 /* This symbol has an entry in the global offset table. Set it
12142 sgot = bfd_get_section_by_name (dynobj, ".got");
12143 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12144 BFD_ASSERT (sgot != NULL && srel != NULL);
12146 offset = (h->got.offset & ~(bfd_vma) 1);
12148 rel.r_offset = (sgot->output_section->vma
12149 + sgot->output_offset
12152 /* If this is a static link, or it is a -Bsymbolic link and the
12153 symbol is defined locally or was forced to be local because
12154 of a version file, we just want to emit a RELATIVE reloc.
12155 The entry in the global offset table will already have been
12156 initialized in the relocate_section function. */
12158 && SYMBOL_REFERENCES_LOCAL (info, h))
12160 BFD_ASSERT ((h->got.offset & 1) != 0);
12161 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12162 if (!htab->use_rel)
12164 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12165 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12170 BFD_ASSERT ((h->got.offset & 1) == 0);
12171 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12172 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12175 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12176 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12182 Elf_Internal_Rela rel;
12185 /* This symbol needs a copy reloc. Set it up. */
12186 BFD_ASSERT (h->dynindx != -1
12187 && (h->root.type == bfd_link_hash_defined
12188 || h->root.type == bfd_link_hash_defweak));
12190 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12191 RELOC_SECTION (htab, ".bss"));
12192 BFD_ASSERT (s != NULL);
12195 rel.r_offset = (h->root.u.def.value
12196 + h->root.u.def.section->output_section->vma
12197 + h->root.u.def.section->output_offset);
12198 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12199 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12200 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12203 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12204 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12205 to the ".got" section. */
12206 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12207 || (!htab->vxworks_p && h == htab->root.hgot))
12208 sym->st_shndx = SHN_ABS;
12213 /* Finish up the dynamic sections. */
12216 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12222 dynobj = elf_hash_table (info)->dynobj;
12224 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12225 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12226 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12228 if (elf_hash_table (info)->dynamic_sections_created)
12231 Elf32_External_Dyn *dyncon, *dynconend;
12232 struct elf32_arm_link_hash_table *htab;
12234 htab = elf32_arm_hash_table (info);
12235 splt = bfd_get_section_by_name (dynobj, ".plt");
12236 BFD_ASSERT (splt != NULL && sdyn != NULL);
12238 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12239 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12241 for (; dyncon < dynconend; dyncon++)
12243 Elf_Internal_Dyn dyn;
12247 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12254 if (htab->vxworks_p
12255 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12256 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12261 goto get_vma_if_bpabi;
12264 goto get_vma_if_bpabi;
12267 goto get_vma_if_bpabi;
12269 name = ".gnu.version";
12270 goto get_vma_if_bpabi;
12272 name = ".gnu.version_d";
12273 goto get_vma_if_bpabi;
12275 name = ".gnu.version_r";
12276 goto get_vma_if_bpabi;
12282 name = RELOC_SECTION (htab, ".plt");
12284 s = bfd_get_section_by_name (output_bfd, name);
12285 BFD_ASSERT (s != NULL);
12286 if (!htab->symbian_p)
12287 dyn.d_un.d_ptr = s->vma;
12289 /* In the BPABI, tags in the PT_DYNAMIC section point
12290 at the file offset, not the memory address, for the
12291 convenience of the post linker. */
12292 dyn.d_un.d_ptr = s->filepos;
12293 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12297 if (htab->symbian_p)
12302 s = bfd_get_section_by_name (output_bfd,
12303 RELOC_SECTION (htab, ".plt"));
12304 BFD_ASSERT (s != NULL);
12305 dyn.d_un.d_val = s->size;
12306 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12311 if (!htab->symbian_p)
12313 /* My reading of the SVR4 ABI indicates that the
12314 procedure linkage table relocs (DT_JMPREL) should be
12315 included in the overall relocs (DT_REL). This is
12316 what Solaris does. However, UnixWare can not handle
12317 that case. Therefore, we override the DT_RELSZ entry
12318 here to make it not include the JMPREL relocs. Since
12319 the linker script arranges for .rel(a).plt to follow all
12320 other relocation sections, we don't have to worry
12321 about changing the DT_REL entry. */
12322 s = bfd_get_section_by_name (output_bfd,
12323 RELOC_SECTION (htab, ".plt"));
12325 dyn.d_un.d_val -= s->size;
12326 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12329 /* Fall through. */
12333 /* In the BPABI, the DT_REL tag must point at the file
12334 offset, not the VMA, of the first relocation
12335 section. So, we use code similar to that in
12336 elflink.c, but do not check for SHF_ALLOC on the
12337 relcoation section, since relocations sections are
12338 never allocated under the BPABI. The comments above
12339 about Unixware notwithstanding, we include all of the
12340 relocations here. */
12341 if (htab->symbian_p)
12344 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12345 ? SHT_REL : SHT_RELA);
12346 dyn.d_un.d_val = 0;
12347 for (i = 1; i < elf_numsections (output_bfd); i++)
12349 Elf_Internal_Shdr *hdr
12350 = elf_elfsections (output_bfd)[i];
12351 if (hdr->sh_type == type)
12353 if (dyn.d_tag == DT_RELSZ
12354 || dyn.d_tag == DT_RELASZ)
12355 dyn.d_un.d_val += hdr->sh_size;
12356 else if ((ufile_ptr) hdr->sh_offset
12357 <= dyn.d_un.d_val - 1)
12358 dyn.d_un.d_val = hdr->sh_offset;
12361 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12365 /* Set the bottom bit of DT_INIT/FINI if the
12366 corresponding function is Thumb. */
12368 name = info->init_function;
12371 name = info->fini_function;
12373 /* If it wasn't set by elf_bfd_final_link
12374 then there is nothing to adjust. */
12375 if (dyn.d_un.d_val != 0)
12377 struct elf_link_hash_entry * eh;
12379 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12380 FALSE, FALSE, TRUE);
12382 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12384 dyn.d_un.d_val |= 1;
12385 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12392 /* Fill in the first entry in the procedure linkage table. */
12393 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12395 const bfd_vma *plt0_entry;
12396 bfd_vma got_address, plt_address, got_displacement;
12398 /* Calculate the addresses of the GOT and PLT. */
12399 got_address = sgot->output_section->vma + sgot->output_offset;
12400 plt_address = splt->output_section->vma + splt->output_offset;
12402 if (htab->vxworks_p)
12404 /* The VxWorks GOT is relocated by the dynamic linker.
12405 Therefore, we must emit relocations rather than simply
12406 computing the values now. */
12407 Elf_Internal_Rela rel;
12409 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12410 put_arm_insn (htab, output_bfd, plt0_entry[0],
12411 splt->contents + 0);
12412 put_arm_insn (htab, output_bfd, plt0_entry[1],
12413 splt->contents + 4);
12414 put_arm_insn (htab, output_bfd, plt0_entry[2],
12415 splt->contents + 8);
12416 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12418 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12419 rel.r_offset = plt_address + 12;
12420 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12422 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12423 htab->srelplt2->contents);
12427 got_displacement = got_address - (plt_address + 16);
12429 plt0_entry = elf32_arm_plt0_entry;
12430 put_arm_insn (htab, output_bfd, plt0_entry[0],
12431 splt->contents + 0);
12432 put_arm_insn (htab, output_bfd, plt0_entry[1],
12433 splt->contents + 4);
12434 put_arm_insn (htab, output_bfd, plt0_entry[2],
12435 splt->contents + 8);
12436 put_arm_insn (htab, output_bfd, plt0_entry[3],
12437 splt->contents + 12);
12439 #ifdef FOUR_WORD_PLT
12440 /* The displacement value goes in the otherwise-unused
12441 last word of the second entry. */
12442 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12444 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12449 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12450 really seem like the right value. */
12451 if (splt->output_section->owner == output_bfd)
12452 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12454 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12456 /* Correct the .rel(a).plt.unloaded relocations. They will have
12457 incorrect symbol indexes. */
12461 num_plts = ((htab->splt->size - htab->plt_header_size)
12462 / htab->plt_entry_size);
12463 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12465 for (; num_plts; num_plts--)
12467 Elf_Internal_Rela rel;
12469 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12470 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12471 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12472 p += RELOC_SIZE (htab);
12474 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12475 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12476 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12477 p += RELOC_SIZE (htab);
12482 /* Fill in the first three entries in the global offset table. */
12485 if (sgot->size > 0)
12488 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12490 bfd_put_32 (output_bfd,
12491 sdyn->output_section->vma + sdyn->output_offset,
12493 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12494 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12497 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12504 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12506 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12507 struct elf32_arm_link_hash_table *globals;
12509 i_ehdrp = elf_elfheader (abfd);
12511 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12512 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12514 i_ehdrp->e_ident[EI_OSABI] = 0;
12515 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12519 globals = elf32_arm_hash_table (link_info);
12520 if (globals->byteswap_code)
12521 i_ehdrp->e_flags |= EF_ARM_BE8;
12525 static enum elf_reloc_type_class
12526 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12528 switch ((int) ELF32_R_TYPE (rela->r_info))
12530 case R_ARM_RELATIVE:
12531 return reloc_class_relative;
12532 case R_ARM_JUMP_SLOT:
12533 return reloc_class_plt;
12535 return reloc_class_copy;
12537 return reloc_class_normal;
12541 /* Set the right machine number for an Arm ELF file. */
12544 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12546 if (hdr->sh_type == SHT_NOTE)
12547 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12553 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12555 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12558 /* Return TRUE if this is an unwinding table entry. */
12561 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12563 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12564 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12568 /* Set the type and flags for an ARM section. We do this by
12569 the section name, which is a hack, but ought to work. */
12572 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12576 name = bfd_get_section_name (abfd, sec);
12578 if (is_arm_elf_unwind_section_name (abfd, name))
12580 hdr->sh_type = SHT_ARM_EXIDX;
12581 hdr->sh_flags |= SHF_LINK_ORDER;
12586 /* Handle an ARM specific section when reading an object file. This is
12587 called when bfd_section_from_shdr finds a section with an unknown
12591 elf32_arm_section_from_shdr (bfd *abfd,
12592 Elf_Internal_Shdr * hdr,
12596 /* There ought to be a place to keep ELF backend specific flags, but
12597 at the moment there isn't one. We just keep track of the
12598 sections by their name, instead. Fortunately, the ABI gives
12599 names for all the ARM specific sections, so we will probably get
12601 switch (hdr->sh_type)
12603 case SHT_ARM_EXIDX:
12604 case SHT_ARM_PREEMPTMAP:
12605 case SHT_ARM_ATTRIBUTES:
12612 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12618 /* A structure used to record a list of sections, independently
12619 of the next and prev fields in the asection structure. */
12620 typedef struct section_list
12623 struct section_list * next;
12624 struct section_list * prev;
12628 /* Unfortunately we need to keep a list of sections for which
12629 an _arm_elf_section_data structure has been allocated. This
12630 is because it is possible for functions like elf32_arm_write_section
12631 to be called on a section which has had an elf_data_structure
12632 allocated for it (and so the used_by_bfd field is valid) but
12633 for which the ARM extended version of this structure - the
12634 _arm_elf_section_data structure - has not been allocated. */
12635 static section_list * sections_with_arm_elf_section_data = NULL;
12638 record_section_with_arm_elf_section_data (asection * sec)
12640 struct section_list * entry;
12642 entry = bfd_malloc (sizeof (* entry));
12646 entry->next = sections_with_arm_elf_section_data;
12647 entry->prev = NULL;
12648 if (entry->next != NULL)
12649 entry->next->prev = entry;
12650 sections_with_arm_elf_section_data = entry;
12653 static struct section_list *
12654 find_arm_elf_section_entry (asection * sec)
12656 struct section_list * entry;
12657 static struct section_list * last_entry = NULL;
12659 /* This is a short cut for the typical case where the sections are added
12660 to the sections_with_arm_elf_section_data list in forward order and
12661 then looked up here in backwards order. This makes a real difference
12662 to the ld-srec/sec64k.exp linker test. */
12663 entry = sections_with_arm_elf_section_data;
12664 if (last_entry != NULL)
12666 if (last_entry->sec == sec)
12667 entry = last_entry;
12668 else if (last_entry->next != NULL
12669 && last_entry->next->sec == sec)
12670 entry = last_entry->next;
12673 for (; entry; entry = entry->next)
12674 if (entry->sec == sec)
12678 /* Record the entry prior to this one - it is the entry we are most
12679 likely to want to locate next time. Also this way if we have been
12680 called from unrecord_section_with_arm_elf_section_data() we will not
12681 be caching a pointer that is about to be freed. */
12682 last_entry = entry->prev;
12687 static _arm_elf_section_data *
12688 get_arm_elf_section_data (asection * sec)
12690 struct section_list * entry;
12692 entry = find_arm_elf_section_entry (sec);
12695 return elf32_arm_section_data (entry->sec);
12701 unrecord_section_with_arm_elf_section_data (asection * sec)
12703 struct section_list * entry;
12705 entry = find_arm_elf_section_entry (sec);
12709 if (entry->prev != NULL)
12710 entry->prev->next = entry->next;
12711 if (entry->next != NULL)
12712 entry->next->prev = entry->prev;
12713 if (entry == sections_with_arm_elf_section_data)
12714 sections_with_arm_elf_section_data = entry->next;
12723 struct bfd_link_info *info;
12726 int (*func) (void *, const char *, Elf_Internal_Sym *,
12727 asection *, struct elf_link_hash_entry *);
12728 } output_arch_syminfo;
12730 enum map_symbol_type
12738 /* Output a single mapping symbol. */
12741 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12742 enum map_symbol_type type,
12745 static const char *names[3] = {"$a", "$t", "$d"};
12746 struct elf32_arm_link_hash_table *htab;
12747 Elf_Internal_Sym sym;
12749 htab = elf32_arm_hash_table (osi->info);
12750 sym.st_value = osi->sec->output_section->vma
12751 + osi->sec->output_offset
12755 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12756 sym.st_shndx = osi->sec_shndx;
12757 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12761 /* Output mapping symbols for PLT entries associated with H. */
12764 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12766 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12767 struct elf32_arm_link_hash_table *htab;
12768 struct elf32_arm_link_hash_entry *eh;
12771 htab = elf32_arm_hash_table (osi->info);
12773 if (h->root.type == bfd_link_hash_indirect)
12776 if (h->root.type == bfd_link_hash_warning)
12777 /* When warning symbols are created, they **replace** the "real"
12778 entry in the hash table, thus we never get to see the real
12779 symbol in a hash traversal. So look at it now. */
12780 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12782 if (h->plt.offset == (bfd_vma) -1)
12785 eh = (struct elf32_arm_link_hash_entry *) h;
12786 addr = h->plt.offset;
12787 if (htab->symbian_p)
12789 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12791 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12794 else if (htab->vxworks_p)
12796 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12798 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12800 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12802 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12807 bfd_signed_vma thumb_refs;
12809 thumb_refs = eh->plt_thumb_refcount;
12810 if (!htab->use_blx)
12811 thumb_refs += eh->plt_maybe_thumb_refcount;
12813 if (thumb_refs > 0)
12815 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12818 #ifdef FOUR_WORD_PLT
12819 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12821 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12824 /* A three-word PLT with no Thumb thunk contains only Arm code,
12825 so only need to output a mapping symbol for the first PLT entry and
12826 entries with thumb thunks. */
12827 if (thumb_refs > 0 || addr == 20)
12829 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12838 /* Output a single local symbol for a generated stub. */
12841 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12842 bfd_vma offset, bfd_vma size)
12844 struct elf32_arm_link_hash_table *htab;
12845 Elf_Internal_Sym sym;
12847 htab = elf32_arm_hash_table (osi->info);
12848 sym.st_value = osi->sec->output_section->vma
12849 + osi->sec->output_offset
12851 sym.st_size = size;
12853 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12854 sym.st_shndx = osi->sec_shndx;
12855 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12859 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12862 struct elf32_arm_stub_hash_entry *stub_entry;
12863 struct bfd_link_info *info;
12864 struct elf32_arm_link_hash_table *htab;
12865 asection *stub_sec;
12868 output_arch_syminfo *osi;
12869 const insn_sequence *template;
12870 enum stub_insn_type prev_type;
12873 enum map_symbol_type sym_type;
12875 /* Massage our args to the form they really have. */
12876 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12877 osi = (output_arch_syminfo *) in_arg;
12881 htab = elf32_arm_hash_table (info);
12882 stub_sec = stub_entry->stub_sec;
12884 /* Ensure this stub is attached to the current section being
12886 if (stub_sec != osi->sec)
12889 addr = (bfd_vma) stub_entry->stub_offset;
12890 stub_name = stub_entry->output_name;
12892 template = stub_entry->stub_template;
12893 switch (template[0].type)
12896 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12901 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12902 stub_entry->stub_size))
12910 prev_type = DATA_TYPE;
12912 for (i = 0; i < stub_entry->stub_template_size; i++)
12914 switch (template[i].type)
12917 sym_type = ARM_MAP_ARM;
12922 sym_type = ARM_MAP_THUMB;
12926 sym_type = ARM_MAP_DATA;
12934 if (template[i].type != prev_type)
12936 prev_type = template[i].type;
12937 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12941 switch (template[i].type)
12965 /* Output mapping symbols for linker generated sections. */
12968 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12969 struct bfd_link_info *info,
12971 int (*func) (void *, const char *,
12972 Elf_Internal_Sym *,
12974 struct elf_link_hash_entry *))
12976 output_arch_syminfo osi;
12977 struct elf32_arm_link_hash_table *htab;
12979 bfd_size_type size;
12981 htab = elf32_arm_hash_table (info);
12982 check_use_blx (htab);
12988 /* ARM->Thumb glue. */
12989 if (htab->arm_glue_size > 0)
12991 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12992 ARM2THUMB_GLUE_SECTION_NAME);
12994 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12995 (output_bfd, osi.sec->output_section);
12996 if (info->shared || htab->root.is_relocatable_executable
12997 || htab->pic_veneer)
12998 size = ARM2THUMB_PIC_GLUE_SIZE;
12999 else if (htab->use_blx)
13000 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13002 size = ARM2THUMB_STATIC_GLUE_SIZE;
13004 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13006 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13007 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13011 /* Thumb->ARM glue. */
13012 if (htab->thumb_glue_size > 0)
13014 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13015 THUMB2ARM_GLUE_SECTION_NAME);
13017 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13018 (output_bfd, osi.sec->output_section);
13019 size = THUMB2ARM_GLUE_SIZE;
13021 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13023 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13024 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13028 /* ARMv4 BX veneers. */
13029 if (htab->bx_glue_size > 0)
13031 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13032 ARM_BX_GLUE_SECTION_NAME);
13034 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13035 (output_bfd, osi.sec->output_section);
13037 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13040 /* Long calls stubs. */
13041 if (htab->stub_bfd && htab->stub_bfd->sections)
13043 asection* stub_sec;
13045 for (stub_sec = htab->stub_bfd->sections;
13047 stub_sec = stub_sec->next)
13049 /* Ignore non-stub sections. */
13050 if (!strstr (stub_sec->name, STUB_SUFFIX))
13053 osi.sec = stub_sec;
13055 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13056 (output_bfd, osi.sec->output_section);
13058 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13062 /* Finally, output mapping symbols for the PLT. */
13063 if (!htab->splt || htab->splt->size == 0)
13066 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13067 htab->splt->output_section);
13068 osi.sec = htab->splt;
13069 /* Output mapping symbols for the plt header. SymbianOS does not have a
13071 if (htab->vxworks_p)
13073 /* VxWorks shared libraries have no PLT header. */
13076 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13078 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13082 else if (!htab->symbian_p)
13084 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13086 #ifndef FOUR_WORD_PLT
13087 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13092 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13096 /* Allocate target specific section data. */
13099 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13101 if (!sec->used_by_bfd)
13103 _arm_elf_section_data *sdata;
13104 bfd_size_type amt = sizeof (*sdata);
13106 sdata = bfd_zalloc (abfd, amt);
13109 sec->used_by_bfd = sdata;
13112 record_section_with_arm_elf_section_data (sec);
13114 return _bfd_elf_new_section_hook (abfd, sec);
13118 /* Used to order a list of mapping symbols by address. */
13121 elf32_arm_compare_mapping (const void * a, const void * b)
13123 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13124 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13126 if (amap->vma > bmap->vma)
13128 else if (amap->vma < bmap->vma)
13130 else if (amap->type > bmap->type)
13131 /* Ensure results do not depend on the host qsort for objects with
13132 multiple mapping symbols at the same address by sorting on type
13135 else if (amap->type < bmap->type)
13141 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13143 static unsigned long
13144 offset_prel31 (unsigned long addr, bfd_vma offset)
13146 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13149 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13153 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13155 unsigned long first_word = bfd_get_32 (output_bfd, from);
13156 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13158 /* High bit of first word is supposed to be zero. */
13159 if ((first_word & 0x80000000ul) == 0)
13160 first_word = offset_prel31 (first_word, offset);
13162 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13163 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13164 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13165 second_word = offset_prel31 (second_word, offset);
13167 bfd_put_32 (output_bfd, first_word, to);
13168 bfd_put_32 (output_bfd, second_word, to + 4);
13171 /* Data for make_branch_to_a8_stub(). */
13173 struct a8_branch_to_stub_data {
13174 asection *writing_section;
13175 bfd_byte *contents;
13179 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13180 places for a particular section. */
13183 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13186 struct elf32_arm_stub_hash_entry *stub_entry;
13187 struct a8_branch_to_stub_data *data;
13188 bfd_byte *contents;
13189 unsigned long branch_insn;
13190 bfd_vma veneered_insn_loc, veneer_entry_loc;
13191 bfd_signed_vma branch_offset;
13193 unsigned int index;
13195 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13196 data = (struct a8_branch_to_stub_data *) in_arg;
13198 if (stub_entry->target_section != data->writing_section
13199 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13202 contents = data->contents;
13204 veneered_insn_loc = stub_entry->target_section->output_section->vma
13205 + stub_entry->target_section->output_offset
13206 + stub_entry->target_value;
13208 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13209 + stub_entry->stub_sec->output_offset
13210 + stub_entry->stub_offset;
13212 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13213 veneered_insn_loc &= ~3u;
13215 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13217 abfd = stub_entry->target_section->owner;
13218 index = stub_entry->target_value;
13220 /* We attempt to avoid this condition by setting stubs_always_after_branch
13221 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13222 This check is just to be on the safe side... */
13223 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13225 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13226 "allocated in unsafe location"), abfd);
13230 switch (stub_entry->stub_type)
13232 case arm_stub_a8_veneer_b:
13233 case arm_stub_a8_veneer_b_cond:
13234 branch_insn = 0xf0009000;
13237 case arm_stub_a8_veneer_blx:
13238 branch_insn = 0xf000e800;
13241 case arm_stub_a8_veneer_bl:
13243 unsigned int i1, j1, i2, j2, s;
13245 branch_insn = 0xf000d000;
13248 if (branch_offset < -16777216 || branch_offset > 16777214)
13250 /* There's not much we can do apart from complain if this
13252 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13253 "of range (input file too large)"), abfd);
13257 /* i1 = not(j1 eor s), so:
13259 j1 = (not i1) eor s. */
13261 branch_insn |= (branch_offset >> 1) & 0x7ff;
13262 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13263 i2 = (branch_offset >> 22) & 1;
13264 i1 = (branch_offset >> 23) & 1;
13265 s = (branch_offset >> 24) & 1;
13268 branch_insn |= j2 << 11;
13269 branch_insn |= j1 << 13;
13270 branch_insn |= s << 26;
13279 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13280 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13285 /* Do code byteswapping. Return FALSE afterwards so that the section is
13286 written out as normal. */
13289 elf32_arm_write_section (bfd *output_bfd,
13290 struct bfd_link_info *link_info,
13292 bfd_byte *contents)
13294 unsigned int mapcount, errcount;
13295 _arm_elf_section_data *arm_data;
13296 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13297 elf32_arm_section_map *map;
13298 elf32_vfp11_erratum_list *errnode;
13301 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13305 /* If this section has not been allocated an _arm_elf_section_data
13306 structure then we cannot record anything. */
13307 arm_data = get_arm_elf_section_data (sec);
13308 if (arm_data == NULL)
13311 mapcount = arm_data->mapcount;
13312 map = arm_data->map;
13313 errcount = arm_data->erratumcount;
13317 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13319 for (errnode = arm_data->erratumlist; errnode != 0;
13320 errnode = errnode->next)
13322 bfd_vma index = errnode->vma - offset;
13324 switch (errnode->type)
13326 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13328 bfd_vma branch_to_veneer;
13329 /* Original condition code of instruction, plus bit mask for
13330 ARM B instruction. */
13331 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13334 /* The instruction is before the label. */
13337 /* Above offset included in -4 below. */
13338 branch_to_veneer = errnode->u.b.veneer->vma
13339 - errnode->vma - 4;
13341 if ((signed) branch_to_veneer < -(1 << 25)
13342 || (signed) branch_to_veneer >= (1 << 25))
13343 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13344 "range"), output_bfd);
13346 insn |= (branch_to_veneer >> 2) & 0xffffff;
13347 contents[endianflip ^ index] = insn & 0xff;
13348 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13349 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13350 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13354 case VFP11_ERRATUM_ARM_VENEER:
13356 bfd_vma branch_from_veneer;
13359 /* Take size of veneer into account. */
13360 branch_from_veneer = errnode->u.v.branch->vma
13361 - errnode->vma - 12;
13363 if ((signed) branch_from_veneer < -(1 << 25)
13364 || (signed) branch_from_veneer >= (1 << 25))
13365 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13366 "range"), output_bfd);
13368 /* Original instruction. */
13369 insn = errnode->u.v.branch->u.b.vfp_insn;
13370 contents[endianflip ^ index] = insn & 0xff;
13371 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13372 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13373 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13375 /* Branch back to insn after original insn. */
13376 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13377 contents[endianflip ^ (index + 4)] = insn & 0xff;
13378 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13379 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13380 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13390 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13392 arm_unwind_table_edit *edit_node
13393 = arm_data->u.exidx.unwind_edit_list;
13394 /* Now, sec->size is the size of the section we will write. The original
13395 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13396 markers) was sec->rawsize. (This isn't the case if we perform no
13397 edits, then rawsize will be zero and we should use size). */
13398 bfd_byte *edited_contents = bfd_malloc (sec->size);
13399 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13400 unsigned int in_index, out_index;
13401 bfd_vma add_to_offsets = 0;
13403 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13407 unsigned int edit_index = edit_node->index;
13409 if (in_index < edit_index && in_index * 8 < input_size)
13411 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13412 contents + in_index * 8, add_to_offsets);
13416 else if (in_index == edit_index
13417 || (in_index * 8 >= input_size
13418 && edit_index == UINT_MAX))
13420 switch (edit_node->type)
13422 case DELETE_EXIDX_ENTRY:
13424 add_to_offsets += 8;
13427 case INSERT_EXIDX_CANTUNWIND_AT_END:
13429 asection *text_sec = edit_node->linked_section;
13430 bfd_vma text_offset = text_sec->output_section->vma
13431 + text_sec->output_offset
13433 bfd_vma exidx_offset = offset + out_index * 8;
13434 unsigned long prel31_offset;
13436 /* Note: this is meant to be equivalent to an
13437 R_ARM_PREL31 relocation. These synthetic
13438 EXIDX_CANTUNWIND markers are not relocated by the
13439 usual BFD method. */
13440 prel31_offset = (text_offset - exidx_offset)
13443 /* First address we can't unwind. */
13444 bfd_put_32 (output_bfd, prel31_offset,
13445 &edited_contents[out_index * 8]);
13447 /* Code for EXIDX_CANTUNWIND. */
13448 bfd_put_32 (output_bfd, 0x1,
13449 &edited_contents[out_index * 8 + 4]);
13452 add_to_offsets -= 8;
13457 edit_node = edit_node->next;
13462 /* No more edits, copy remaining entries verbatim. */
13463 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13464 contents + in_index * 8, add_to_offsets);
13470 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13471 bfd_set_section_contents (output_bfd, sec->output_section,
13473 (file_ptr) sec->output_offset, sec->size);
13478 /* Fix code to point to Cortex-A8 erratum stubs. */
13479 if (globals->fix_cortex_a8)
13481 struct a8_branch_to_stub_data data;
13483 data.writing_section = sec;
13484 data.contents = contents;
13486 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13493 if (globals->byteswap_code)
13495 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13498 for (i = 0; i < mapcount; i++)
13500 if (i == mapcount - 1)
13503 end = map[i + 1].vma;
13505 switch (map[i].type)
13508 /* Byte swap code words. */
13509 while (ptr + 3 < end)
13511 tmp = contents[ptr];
13512 contents[ptr] = contents[ptr + 3];
13513 contents[ptr + 3] = tmp;
13514 tmp = contents[ptr + 1];
13515 contents[ptr + 1] = contents[ptr + 2];
13516 contents[ptr + 2] = tmp;
13522 /* Byte swap code halfwords. */
13523 while (ptr + 1 < end)
13525 tmp = contents[ptr];
13526 contents[ptr] = contents[ptr + 1];
13527 contents[ptr + 1] = tmp;
13533 /* Leave data alone. */
13541 arm_data->mapcount = 0;
13542 arm_data->mapsize = 0;
13543 arm_data->map = NULL;
13544 unrecord_section_with_arm_elf_section_data (sec);
13550 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13552 void * ignore ATTRIBUTE_UNUSED)
13554 unrecord_section_with_arm_elf_section_data (sec);
13558 elf32_arm_close_and_cleanup (bfd * abfd)
13560 if (abfd->sections)
13561 bfd_map_over_sections (abfd,
13562 unrecord_section_via_map_over_sections,
13565 return _bfd_elf_close_and_cleanup (abfd);
13569 elf32_arm_bfd_free_cached_info (bfd * abfd)
13571 if (abfd->sections)
13572 bfd_map_over_sections (abfd,
13573 unrecord_section_via_map_over_sections,
13576 return _bfd_free_cached_info (abfd);
13579 /* Display STT_ARM_TFUNC symbols as functions. */
13582 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13585 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13587 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13588 elfsym->symbol.flags |= BSF_FUNCTION;
13592 /* Mangle thumb function symbols as we read them in. */
13595 elf32_arm_swap_symbol_in (bfd * abfd,
13598 Elf_Internal_Sym *dst)
13600 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13603 /* New EABI objects mark thumb function symbols by setting the low bit of
13604 the address. Turn these into STT_ARM_TFUNC. */
13605 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13606 && (dst->st_value & 1))
13608 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13609 dst->st_value &= ~(bfd_vma) 1;
13615 /* Mangle thumb function symbols as we write them out. */
13618 elf32_arm_swap_symbol_out (bfd *abfd,
13619 const Elf_Internal_Sym *src,
13623 Elf_Internal_Sym newsym;
13625 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13626 of the address set, as per the new EABI. We do this unconditionally
13627 because objcopy does not set the elf header flags until after
13628 it writes out the symbol table. */
13629 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13632 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13633 if (newsym.st_shndx != SHN_UNDEF)
13635 /* Do this only for defined symbols. At link type, the static
13636 linker will simulate the work of dynamic linker of resolving
13637 symbols and will carry over the thumbness of found symbols to
13638 the output symbol table. It's not clear how it happens, but
13639 the thumbness of undefined symbols can well be different at
13640 runtime, and writing '1' for them will be confusing for users
13641 and possibly for dynamic linker itself.
13643 newsym.st_value |= 1;
13648 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13651 /* Add the PT_ARM_EXIDX program header. */
13654 elf32_arm_modify_segment_map (bfd *abfd,
13655 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13657 struct elf_segment_map *m;
13660 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13661 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13663 /* If there is already a PT_ARM_EXIDX header, then we do not
13664 want to add another one. This situation arises when running
13665 "strip"; the input binary already has the header. */
13666 m = elf_tdata (abfd)->segment_map;
13667 while (m && m->p_type != PT_ARM_EXIDX)
13671 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13674 m->p_type = PT_ARM_EXIDX;
13676 m->sections[0] = sec;
13678 m->next = elf_tdata (abfd)->segment_map;
13679 elf_tdata (abfd)->segment_map = m;
13686 /* We may add a PT_ARM_EXIDX program header. */
13689 elf32_arm_additional_program_headers (bfd *abfd,
13690 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13694 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13695 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13701 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13704 elf32_arm_is_function_type (unsigned int type)
13706 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13709 /* We use this to override swap_symbol_in and swap_symbol_out. */
13710 const struct elf_size_info elf32_arm_size_info =
13712 sizeof (Elf32_External_Ehdr),
13713 sizeof (Elf32_External_Phdr),
13714 sizeof (Elf32_External_Shdr),
13715 sizeof (Elf32_External_Rel),
13716 sizeof (Elf32_External_Rela),
13717 sizeof (Elf32_External_Sym),
13718 sizeof (Elf32_External_Dyn),
13719 sizeof (Elf_External_Note),
13723 ELFCLASS32, EV_CURRENT,
13724 bfd_elf32_write_out_phdrs,
13725 bfd_elf32_write_shdrs_and_ehdr,
13726 bfd_elf32_checksum_contents,
13727 bfd_elf32_write_relocs,
13728 elf32_arm_swap_symbol_in,
13729 elf32_arm_swap_symbol_out,
13730 bfd_elf32_slurp_reloc_table,
13731 bfd_elf32_slurp_symbol_table,
13732 bfd_elf32_swap_dyn_in,
13733 bfd_elf32_swap_dyn_out,
13734 bfd_elf32_swap_reloc_in,
13735 bfd_elf32_swap_reloc_out,
13736 bfd_elf32_swap_reloca_in,
13737 bfd_elf32_swap_reloca_out
13740 #define ELF_ARCH bfd_arch_arm
13741 #define ELF_MACHINE_CODE EM_ARM
13742 #ifdef __QNXTARGET__
13743 #define ELF_MAXPAGESIZE 0x1000
13745 #define ELF_MAXPAGESIZE 0x8000
13747 #define ELF_MINPAGESIZE 0x1000
13748 #define ELF_COMMONPAGESIZE 0x1000
13750 #define bfd_elf32_mkobject elf32_arm_mkobject
13752 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13753 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13754 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13755 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13756 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13757 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13758 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13759 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13760 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13761 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13762 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13763 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13764 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13765 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13766 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13768 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13769 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13770 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13771 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13772 #define elf_backend_check_relocs elf32_arm_check_relocs
13773 #define elf_backend_relocate_section elf32_arm_relocate_section
13774 #define elf_backend_write_section elf32_arm_write_section
13775 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13776 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13777 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13778 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13779 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13780 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13781 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13782 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13783 #define elf_backend_object_p elf32_arm_object_p
13784 #define elf_backend_section_flags elf32_arm_section_flags
13785 #define elf_backend_fake_sections elf32_arm_fake_sections
13786 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13787 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13788 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13789 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13790 #define elf_backend_size_info elf32_arm_size_info
13791 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13792 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13793 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13794 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13795 #define elf_backend_is_function_type elf32_arm_is_function_type
13797 #define elf_backend_can_refcount 1
13798 #define elf_backend_can_gc_sections 1
13799 #define elf_backend_plt_readonly 1
13800 #define elf_backend_want_got_plt 1
13801 #define elf_backend_want_plt_sym 0
13802 #define elf_backend_may_use_rel_p 1
13803 #define elf_backend_may_use_rela_p 0
13804 #define elf_backend_default_use_rela_p 0
13806 #define elf_backend_got_header_size 12
13808 #undef elf_backend_obj_attrs_vendor
13809 #define elf_backend_obj_attrs_vendor "aeabi"
13810 #undef elf_backend_obj_attrs_section
13811 #define elf_backend_obj_attrs_section ".ARM.attributes"
13812 #undef elf_backend_obj_attrs_arg_type
13813 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13814 #undef elf_backend_obj_attrs_section_type
13815 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13816 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13818 #include "elf32-target.h"
13820 /* VxWorks Targets. */
13822 #undef TARGET_LITTLE_SYM
13823 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13824 #undef TARGET_LITTLE_NAME
13825 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13826 #undef TARGET_BIG_SYM
13827 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13828 #undef TARGET_BIG_NAME
13829 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13831 /* Like elf32_arm_link_hash_table_create -- but overrides
13832 appropriately for VxWorks. */
13834 static struct bfd_link_hash_table *
13835 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13837 struct bfd_link_hash_table *ret;
13839 ret = elf32_arm_link_hash_table_create (abfd);
13842 struct elf32_arm_link_hash_table *htab
13843 = (struct elf32_arm_link_hash_table *) ret;
13845 htab->vxworks_p = 1;
13851 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13853 elf32_arm_final_write_processing (abfd, linker);
13854 elf_vxworks_final_write_processing (abfd, linker);
13858 #define elf32_bed elf32_arm_vxworks_bed
13860 #undef bfd_elf32_bfd_link_hash_table_create
13861 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13862 #undef elf_backend_add_symbol_hook
13863 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13864 #undef elf_backend_final_write_processing
13865 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13866 #undef elf_backend_emit_relocs
13867 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13869 #undef elf_backend_may_use_rel_p
13870 #define elf_backend_may_use_rel_p 0
13871 #undef elf_backend_may_use_rela_p
13872 #define elf_backend_may_use_rela_p 1
13873 #undef elf_backend_default_use_rela_p
13874 #define elf_backend_default_use_rela_p 1
13875 #undef elf_backend_want_plt_sym
13876 #define elf_backend_want_plt_sym 1
13877 #undef ELF_MAXPAGESIZE
13878 #define ELF_MAXPAGESIZE 0x1000
13880 #include "elf32-target.h"
13883 /* Symbian OS Targets. */
13885 #undef TARGET_LITTLE_SYM
13886 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13887 #undef TARGET_LITTLE_NAME
13888 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13889 #undef TARGET_BIG_SYM
13890 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13891 #undef TARGET_BIG_NAME
13892 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13894 /* Like elf32_arm_link_hash_table_create -- but overrides
13895 appropriately for Symbian OS. */
13897 static struct bfd_link_hash_table *
13898 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13900 struct bfd_link_hash_table *ret;
13902 ret = elf32_arm_link_hash_table_create (abfd);
13905 struct elf32_arm_link_hash_table *htab
13906 = (struct elf32_arm_link_hash_table *)ret;
13907 /* There is no PLT header for Symbian OS. */
13908 htab->plt_header_size = 0;
13909 /* The PLT entries are each one instruction and one word. */
13910 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13911 htab->symbian_p = 1;
13912 /* Symbian uses armv5t or above, so use_blx is always true. */
13914 htab->root.is_relocatable_executable = 1;
13919 static const struct bfd_elf_special_section
13920 elf32_arm_symbian_special_sections[] =
13922 /* In a BPABI executable, the dynamic linking sections do not go in
13923 the loadable read-only segment. The post-linker may wish to
13924 refer to these sections, but they are not part of the final
13926 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13927 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13928 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13929 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13930 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13931 /* These sections do not need to be writable as the SymbianOS
13932 postlinker will arrange things so that no dynamic relocation is
13934 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13935 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13936 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13937 { NULL, 0, 0, 0, 0 }
13941 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13942 struct bfd_link_info *link_info)
13944 /* BPABI objects are never loaded directly by an OS kernel; they are
13945 processed by a postlinker first, into an OS-specific format. If
13946 the D_PAGED bit is set on the file, BFD will align segments on
13947 page boundaries, so that an OS can directly map the file. With
13948 BPABI objects, that just results in wasted space. In addition,
13949 because we clear the D_PAGED bit, map_sections_to_segments will
13950 recognize that the program headers should not be mapped into any
13951 loadable segment. */
13952 abfd->flags &= ~D_PAGED;
13953 elf32_arm_begin_write_processing (abfd, link_info);
13957 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13958 struct bfd_link_info *info)
13960 struct elf_segment_map *m;
13963 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13964 segment. However, because the .dynamic section is not marked
13965 with SEC_LOAD, the generic ELF code will not create such a
13967 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13970 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13971 if (m->p_type == PT_DYNAMIC)
13976 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13977 m->next = elf_tdata (abfd)->segment_map;
13978 elf_tdata (abfd)->segment_map = m;
13982 /* Also call the generic arm routine. */
13983 return elf32_arm_modify_segment_map (abfd, info);
13986 /* Return address for Ith PLT stub in section PLT, for relocation REL
13987 or (bfd_vma) -1 if it should not be included. */
13990 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13991 const arelent *rel ATTRIBUTE_UNUSED)
13993 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13998 #define elf32_bed elf32_arm_symbian_bed
14000 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14001 will process them and then discard them. */
14002 #undef ELF_DYNAMIC_SEC_FLAGS
14003 #define ELF_DYNAMIC_SEC_FLAGS \
14004 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14006 #undef elf_backend_add_symbol_hook
14007 #undef elf_backend_emit_relocs
14009 #undef bfd_elf32_bfd_link_hash_table_create
14010 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14011 #undef elf_backend_special_sections
14012 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14013 #undef elf_backend_begin_write_processing
14014 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14015 #undef elf_backend_final_write_processing
14016 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14018 #undef elf_backend_modify_segment_map
14019 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14021 /* There is no .got section for BPABI objects, and hence no header. */
14022 #undef elf_backend_got_header_size
14023 #define elf_backend_got_header_size 0
14025 /* Similarly, there is no .got.plt section. */
14026 #undef elf_backend_want_got_plt
14027 #define elf_backend_want_got_plt 0
14029 #undef elf_backend_plt_sym_val
14030 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14032 #undef elf_backend_may_use_rel_p
14033 #define elf_backend_may_use_rel_p 1
14034 #undef elf_backend_may_use_rela_p
14035 #define elf_backend_may_use_rela_p 0
14036 #undef elf_backend_default_use_rela_p
14037 #define elf_backend_default_use_rela_p 0
14038 #undef elf_backend_want_plt_sym
14039 #define elf_backend_want_plt_sym 0
14040 #undef ELF_MAXPAGESIZE
14041 #define ELF_MAXPAGESIZE 0x8000
14043 #include "elf32-target.h"