1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "libiberty.h"
29 #include "elf-vxworks.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 static struct elf_backend_data elf32_arm_vxworks_bed;
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 static reloc_howto_type elf32_arm_howto_table_1[] =
78 HOWTO (R_ARM_NONE, /* type */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
82 FALSE, /* pc_relative */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
90 FALSE), /* pcrel_offset */
92 HOWTO (R_ARM_PC24, /* type */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
96 TRUE, /* pc_relative */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
111 FALSE, /* pc_relative */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
126 TRUE, /* pc_relative */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
141 TRUE, /* pc_relative */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
156 FALSE, /* pc_relative */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
171 FALSE, /* pc_relative */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
181 HOWTO (R_ARM_THM_ABS5, /* type */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
185 FALSE, /* pc_relative */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
196 HOWTO (R_ARM_ABS8, /* type */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
200 FALSE, /* pc_relative */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
210 HOWTO (R_ARM_SBREL32, /* type */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
214 FALSE, /* pc_relative */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
224 HOWTO (R_ARM_THM_CALL, /* type */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
228 TRUE, /* pc_relative */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
238 HOWTO (R_ARM_THM_PC8, /* type */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
242 TRUE, /* pc_relative */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
252 HOWTO (R_ARM_BREL_ADJ, /* type */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
256 FALSE, /* pc_relative */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
266 HOWTO (R_ARM_SWI24, /* type */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
270 FALSE, /* pc_relative */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
280 HOWTO (R_ARM_THM_SWI8, /* type */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
284 FALSE, /* pc_relative */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
299 TRUE, /* pc_relative */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
314 TRUE, /* pc_relative */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
324 /* Dynamic TLS relocations. */
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
330 FALSE, /* pc_relative */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
344 FALSE, /* pc_relative */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
358 FALSE, /* pc_relative */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
368 /* Relocs used in ARM Linux */
370 HOWTO (R_ARM_COPY, /* type */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
374 FALSE, /* pc_relative */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
384 HOWTO (R_ARM_GLOB_DAT, /* type */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
388 FALSE, /* pc_relative */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
402 FALSE, /* pc_relative */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
412 HOWTO (R_ARM_RELATIVE, /* type */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
416 FALSE, /* pc_relative */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
426 HOWTO (R_ARM_GOTOFF32, /* type */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
430 FALSE, /* pc_relative */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
440 HOWTO (R_ARM_GOTPC, /* type */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
444 TRUE, /* pc_relative */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
454 HOWTO (R_ARM_GOT32, /* type */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
458 FALSE, /* pc_relative */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
468 HOWTO (R_ARM_PLT32, /* type */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
472 TRUE, /* pc_relative */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
482 HOWTO (R_ARM_CALL, /* type */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
486 TRUE, /* pc_relative */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
496 HOWTO (R_ARM_JUMP24, /* type */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
500 TRUE, /* pc_relative */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
510 HOWTO (R_ARM_THM_JUMP24, /* type */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
514 TRUE, /* pc_relative */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
524 HOWTO (R_ARM_BASE_ABS, /* type */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
528 FALSE, /* pc_relative */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
542 TRUE, /* pc_relative */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
556 TRUE, /* pc_relative */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
570 TRUE, /* pc_relative */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
584 FALSE, /* pc_relative */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 FALSE, /* pc_relative */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
612 FALSE, /* pc_relative */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
622 HOWTO (R_ARM_TARGET1, /* type */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
626 FALSE, /* pc_relative */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
636 HOWTO (R_ARM_ROSEGREL32, /* type */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
640 FALSE, /* pc_relative */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
650 HOWTO (R_ARM_V4BX, /* type */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
654 FALSE, /* pc_relative */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
664 HOWTO (R_ARM_TARGET2, /* type */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
668 FALSE, /* pc_relative */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
678 HOWTO (R_ARM_PREL31, /* type */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
682 TRUE, /* pc_relative */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
696 FALSE, /* pc_relative */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
706 HOWTO (R_ARM_MOVT_ABS, /* type */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
710 FALSE, /* pc_relative */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
724 TRUE, /* pc_relative */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
734 HOWTO (R_ARM_MOVT_PREL, /* type */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
738 TRUE, /* pc_relative */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
752 FALSE, /* pc_relative */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
766 FALSE, /* pc_relative */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
780 TRUE, /* pc_relative */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
794 TRUE, /* pc_relative */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
804 HOWTO (R_ARM_THM_JUMP19, /* type */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
808 TRUE, /* pc_relative */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
818 HOWTO (R_ARM_THM_JUMP6, /* type */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
822 TRUE, /* pc_relative */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
839 TRUE, /* pc_relative */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
849 HOWTO (R_ARM_THM_PC12, /* type */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
853 TRUE, /* pc_relative */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
863 HOWTO (R_ARM_ABS32_NOI, /* type */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
867 FALSE, /* pc_relative */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
877 HOWTO (R_ARM_REL32_NOI, /* type */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
881 TRUE, /* pc_relative */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
891 /* Group relocations. */
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
897 TRUE, /* pc_relative */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
911 TRUE, /* pc_relative */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
925 TRUE, /* pc_relative */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
939 TRUE, /* pc_relative */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
953 TRUE, /* pc_relative */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
967 TRUE, /* pc_relative */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
981 TRUE, /* pc_relative */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
995 TRUE, /* pc_relative */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1009 TRUE, /* pc_relative */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1023 TRUE, /* pc_relative */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1037 TRUE, /* pc_relative */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 TRUE, /* pc_relative */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1065 TRUE, /* pc_relative */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1079 TRUE, /* pc_relative */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1093 TRUE, /* pc_relative */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1107 TRUE, /* pc_relative */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 TRUE, /* pc_relative */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1135 TRUE, /* pc_relative */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1149 TRUE, /* pc_relative */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1163 TRUE, /* pc_relative */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1177 TRUE, /* pc_relative */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1191 TRUE, /* pc_relative */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1205 TRUE, /* pc_relative */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1219 TRUE, /* pc_relative */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1233 TRUE, /* pc_relative */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1247 TRUE, /* pc_relative */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1261 TRUE, /* pc_relative */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1271 /* End of group relocations. */
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1277 FALSE, /* pc_relative */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1291 FALSE, /* pc_relative */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1305 FALSE, /* pc_relative */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1319 FALSE, /* pc_relative */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1333 FALSE, /* pc_relative */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 FALSE, /* pc_relative */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1357 EMPTY_HOWTO (90), /* Unallocated. */
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1366 FALSE, /* pc_relative */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1380 FALSE, /* pc_relative */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1394 TRUE, /* pc_relative */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1408 FALSE, /* pc_relative */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1422 FALSE, /* pc_relative */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1439 FALSE, /* pc_relative */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1447 FALSE), /* pcrel_offset */
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1454 FALSE, /* pc_relative */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1462 FALSE), /* pcrel_offset */
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1468 TRUE, /* pc_relative */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1482 TRUE, /* pc_relative */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1497 FALSE, /* pc_relative */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1511 FALSE, /* pc_relative */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1525 FALSE, /* pc_relative */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1539 FALSE, /* pc_relative */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1553 FALSE, /* pc_relative */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1567 FALSE, /* pc_relative */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1581 FALSE, /* pc_relative */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1595 FALSE, /* pc_relative */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1610 249-255 extended, currently unused, relocations: */
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1614 HOWTO (R_ARM_RREL32, /* type */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1618 FALSE, /* pc_relative */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1626 FALSE), /* pcrel_offset */
1628 HOWTO (R_ARM_RABS32, /* type */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1632 FALSE, /* pc_relative */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1640 FALSE), /* pcrel_offset */
1642 HOWTO (R_ARM_RPC24, /* type */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1646 FALSE, /* pc_relative */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1654 FALSE), /* pcrel_offset */
1656 HOWTO (R_ARM_RBASE, /* type */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1660 FALSE, /* pc_relative */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1668 FALSE) /* pcrel_offset */
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1688 unsigned int r_type;
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1694 struct elf32_arm_reloc_map
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1815 /* Support for core dump NOTE sections. */
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1823 switch (note->descsz)
1828 case 148: /* Linux/ARM 32-bit. */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1850 switch (note->descsz)
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1912 /* The name of the dynamic interpreter. This is put in the .interp
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1916 #ifdef FOUR_WORD_PLT
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1930 /* Subsequent entries in a procedure linkage table look like
1932 static const bfd_vma elf32_arm_plt_entry [] =
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1955 /* Subsequent entries in a procedure linkage table look like
1957 static const bfd_vma elf32_arm_plt_entry [] =
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2006 /* The entries in a PLT when using a DLL-based target with multiple
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2171 /* Cortex-A8 erratum-workaround stubs. */
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2183 /* Stub used for b.w and bl.w instructions. */
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2204 /* Section name for stubs is the associated section name plus this
2206 #define STUB_SUFFIX ".stub"
2208 /* One entry per long/short branch stub defined above. */
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2236 const insn_sequence* template;
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2246 struct elf32_arm_stub_hash_entry
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2251 /* The stub section. */
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2294 /* Used to build a map of a section. This is required for mixed-endian
2297 typedef struct elf32_elf_section_map
2302 elf32_arm_section_map;
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2313 elf32_vfp11_erratum_type;
2315 typedef struct elf32_vfp11_erratum_list
2317 struct elf32_vfp11_erratum_list *next;
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2328 struct elf32_vfp11_erratum_list *branch;
2332 elf32_vfp11_erratum_type type;
2334 elf32_vfp11_erratum_list;
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2341 arm_unwind_edit_type;
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2352 struct arm_unwind_table_edit *next;
2354 arm_unwind_table_edit;
2356 typedef struct _arm_elf_section_data
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2369 /* Unwind info attached to a text section. */
2372 asection *arm_exidx_sec;
2375 /* Unwind info attached to an .ARM.exidx section. */
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2383 _arm_elf_section_data;
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2394 struct a8_erratum_fix {
2399 unsigned long orig_insn;
2401 enum elf32_arm_stub_type stub_type;
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 struct a8_erratum_reloc {
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2416 /* The size of the thread control block. */
2419 struct elf_arm_obj_tdata
2421 struct elf_obj_tdata root;
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2445 elf32_arm_mkobject (bfd *abfd)
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2476 struct elf_link_hash_entry root;
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2504 /* A pointer to the most recently used stub hash entry against this
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2545 bfd_size_type vfp11_erratum_glue_size;
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2556 /* Nonzero to output a BE8 image. */
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2584 /* Nonzero to force PIC branch veneers. */
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2593 /* True if the target system is VxWorks. */
2596 /* True if the target system is Symbian OS. */
2599 /* True if the target uses REL relocations. */
2602 /* Short-cuts to get to dynamic linker sections. */
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2617 bfd_signed_vma refcount;
2621 /* Small local sym cache. */
2622 struct sym_cache sym_cache;
2624 /* For convenience in allocate_dynrelocs. */
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2630 /* Linker stub bfd. */
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2641 /* This is the section to which stubs in the group will be
2644 /* The stub section. */
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2651 asection **input_list;
2654 /* Create an entry in an ARM ELF linker hash table. */
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2664 /* Allocate the structure if it has not already been allocated by a
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2669 return (struct bfd_hash_entry *) ret;
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2684 ret->stub_cache = NULL;
2687 return (struct bfd_hash_entry *) ret;
2690 /* Initialize an entry in the stub hash table. */
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2697 /* Allocate the structure if it has not already been allocated by a
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2711 struct elf32_arm_stub_hash_entry *eh;
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->stub_type = arm_stub_none;
2721 eh->stub_template = NULL;
2722 eh->stub_template_size = 0;
2730 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2731 shortcuts to them in our hash table. */
2734 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2736 struct elf32_arm_link_hash_table *htab;
2738 htab = elf32_arm_hash_table (info);
2739 /* BPABI objects never have a GOT, or associated sections. */
2740 if (htab->symbian_p)
2743 if (! _bfd_elf_create_got_section (dynobj, info))
2746 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2747 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2748 if (!htab->sgot || !htab->sgotplt)
2751 htab->srelgot = bfd_get_section_by_name (dynobj,
2752 RELOC_SECTION (htab, ".got"));
2753 if (htab->srelgot == NULL)
2758 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2759 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2763 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2765 struct elf32_arm_link_hash_table *htab;
2767 htab = elf32_arm_hash_table (info);
2768 if (!htab->sgot && !create_got_section (dynobj, info))
2771 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2774 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2775 htab->srelplt = bfd_get_section_by_name (dynobj,
2776 RELOC_SECTION (htab, ".plt"));
2777 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2779 htab->srelbss = bfd_get_section_by_name (dynobj,
2780 RELOC_SECTION (htab, ".bss"));
2782 if (htab->vxworks_p)
2784 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2789 htab->plt_header_size = 0;
2790 htab->plt_entry_size
2791 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2795 htab->plt_header_size
2796 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2797 htab->plt_entry_size
2798 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2805 || (!info->shared && !htab->srelbss))
2811 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2814 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2815 struct elf_link_hash_entry *dir,
2816 struct elf_link_hash_entry *ind)
2818 struct elf32_arm_link_hash_entry *edir, *eind;
2820 edir = (struct elf32_arm_link_hash_entry *) dir;
2821 eind = (struct elf32_arm_link_hash_entry *) ind;
2823 if (eind->relocs_copied != NULL)
2825 if (edir->relocs_copied != NULL)
2827 struct elf32_arm_relocs_copied **pp;
2828 struct elf32_arm_relocs_copied *p;
2830 /* Add reloc counts against the indirect sym to the direct sym
2831 list. Merge any entries against the same section. */
2832 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2834 struct elf32_arm_relocs_copied *q;
2836 for (q = edir->relocs_copied; q != NULL; q = q->next)
2837 if (q->section == p->section)
2839 q->pc_count += p->pc_count;
2840 q->count += p->count;
2847 *pp = edir->relocs_copied;
2850 edir->relocs_copied = eind->relocs_copied;
2851 eind->relocs_copied = NULL;
2854 if (ind->root.type == bfd_link_hash_indirect)
2856 /* Copy over PLT info. */
2857 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2858 eind->plt_thumb_refcount = 0;
2859 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2860 eind->plt_maybe_thumb_refcount = 0;
2862 if (dir->got.refcount <= 0)
2864 edir->tls_type = eind->tls_type;
2865 eind->tls_type = GOT_UNKNOWN;
2869 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2872 /* Create an ARM elf linker hash table. */
2874 static struct bfd_link_hash_table *
2875 elf32_arm_link_hash_table_create (bfd *abfd)
2877 struct elf32_arm_link_hash_table *ret;
2878 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2880 ret = bfd_malloc (amt);
2884 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2885 elf32_arm_link_hash_newfunc,
2886 sizeof (struct elf32_arm_link_hash_entry)))
2893 ret->sgotplt = NULL;
2894 ret->srelgot = NULL;
2896 ret->srelplt = NULL;
2897 ret->sdynbss = NULL;
2898 ret->srelbss = NULL;
2899 ret->srelplt2 = NULL;
2900 ret->thumb_glue_size = 0;
2901 ret->arm_glue_size = 0;
2902 ret->bx_glue_size = 0;
2903 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2904 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2905 ret->vfp11_erratum_glue_size = 0;
2906 ret->num_vfp11_fixes = 0;
2907 ret->fix_cortex_a8 = 0;
2908 ret->bfd_of_glue_owner = NULL;
2909 ret->byteswap_code = 0;
2910 ret->target1_is_rel = 0;
2911 ret->target2_reloc = R_ARM_NONE;
2912 #ifdef FOUR_WORD_PLT
2913 ret->plt_header_size = 16;
2914 ret->plt_entry_size = 16;
2916 ret->plt_header_size = 20;
2917 ret->plt_entry_size = 12;
2924 ret->sym_cache.abfd = NULL;
2926 ret->tls_ldm_got.refcount = 0;
2927 ret->stub_bfd = NULL;
2928 ret->add_stub_section = NULL;
2929 ret->layout_sections_again = NULL;
2930 ret->stub_group = NULL;
2933 ret->input_list = NULL;
2935 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2936 sizeof (struct elf32_arm_stub_hash_entry)))
2942 return &ret->root.root;
2945 /* Free the derived linker hash table. */
2948 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2950 struct elf32_arm_link_hash_table *ret
2951 = (struct elf32_arm_link_hash_table *) hash;
2953 bfd_hash_table_free (&ret->stub_hash_table);
2954 _bfd_generic_link_hash_table_free (hash);
2957 /* Determine if we're dealing with a Thumb only architecture. */
2960 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2962 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2966 if (arch != TAG_CPU_ARCH_V7)
2969 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2970 Tag_CPU_arch_profile);
2972 return profile == 'M';
2975 /* Determine if we're dealing with a Thumb-2 object. */
2978 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2980 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2982 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2986 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2990 case arm_stub_long_branch_thumb_only:
2991 case arm_stub_long_branch_v4t_thumb_arm:
2992 case arm_stub_short_branch_v4t_thumb_arm:
2993 case arm_stub_long_branch_v4t_thumb_arm_pic:
2994 case arm_stub_long_branch_thumb_only_pic:
3005 /* Determine the type of stub needed, if any, for a call. */
3007 static enum elf32_arm_stub_type
3008 arm_type_of_stub (struct bfd_link_info *info,
3009 asection *input_sec,
3010 const Elf_Internal_Rela *rel,
3011 unsigned char st_type,
3012 struct elf32_arm_link_hash_entry *hash,
3013 bfd_vma destination,
3019 bfd_signed_vma branch_offset;
3020 unsigned int r_type;
3021 struct elf32_arm_link_hash_table * globals;
3024 enum elf32_arm_stub_type stub_type = arm_stub_none;
3027 /* We don't know the actual type of destination in case it is of
3028 type STT_SECTION: give up. */
3029 if (st_type == STT_SECTION)
3032 globals = elf32_arm_hash_table (info);
3034 thumb_only = using_thumb_only (globals);
3036 thumb2 = using_thumb2 (globals);
3038 /* Determine where the call point is. */
3039 location = (input_sec->output_offset
3040 + input_sec->output_section->vma
3043 branch_offset = (bfd_signed_vma)(destination - location);
3045 r_type = ELF32_R_TYPE (rel->r_info);
3047 /* Keep a simpler condition, for the sake of clarity. */
3048 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3051 /* Note when dealing with PLT entries: the main PLT stub is in
3052 ARM mode, so if the branch is in Thumb mode, another
3053 Thumb->ARM stub will be inserted later just before the ARM
3054 PLT stub. We don't take this extra distance into account
3055 here, because if a long branch stub is needed, we'll add a
3056 Thumb->Arm one and branch directly to the ARM PLT entry
3057 because it avoids spreading offset corrections in several
3061 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3063 /* Handle cases where:
3064 - this call goes too far (different Thumb/Thumb2 max
3066 - it's a Thumb->Arm call and blx is not available, or it's a
3067 Thumb->Arm branch (not bl). A stub is needed in this case,
3068 but only if this call is not through a PLT entry. Indeed,
3069 PLT stubs handle mode switching already.
3072 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3073 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3075 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3076 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3077 || ((st_type != STT_ARM_TFUNC)
3078 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3079 || (r_type == R_ARM_THM_JUMP24))
3082 if (st_type == STT_ARM_TFUNC)
3084 /* Thumb to thumb. */
3087 stub_type = (info->shared | globals->pic_veneer)
3089 ? ((globals->use_blx
3090 && (r_type ==R_ARM_THM_CALL))
3091 /* V5T and above. Stub starts with ARM code, so
3092 we must be able to switch mode before
3093 reaching it, which is only possible for 'bl'
3094 (ie R_ARM_THM_CALL relocation). */
3095 ? arm_stub_long_branch_any_thumb_pic
3096 /* On V4T, use Thumb code only. */
3097 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3099 /* non-PIC stubs. */
3100 : ((globals->use_blx
3101 && (r_type ==R_ARM_THM_CALL))
3102 /* V5T and above. */
3103 ? arm_stub_long_branch_any_any
3105 : arm_stub_long_branch_v4t_thumb_thumb);
3109 stub_type = (info->shared | globals->pic_veneer)
3111 ? arm_stub_long_branch_thumb_only_pic
3113 : arm_stub_long_branch_thumb_only;
3120 && sym_sec->owner != NULL
3121 && !INTERWORK_FLAG (sym_sec->owner))
3123 (*_bfd_error_handler)
3124 (_("%B(%s): warning: interworking not enabled.\n"
3125 " first occurrence: %B: Thumb call to ARM"),
3126 sym_sec->owner, input_bfd, name);
3129 stub_type = (info->shared | globals->pic_veneer)
3131 ? ((globals->use_blx
3132 && (r_type ==R_ARM_THM_CALL))
3133 /* V5T and above. */
3134 ? arm_stub_long_branch_any_arm_pic
3136 : arm_stub_long_branch_v4t_thumb_arm_pic)
3138 /* non-PIC stubs. */
3139 : ((globals->use_blx
3140 && (r_type ==R_ARM_THM_CALL))
3141 /* V5T and above. */
3142 ? arm_stub_long_branch_any_any
3144 : arm_stub_long_branch_v4t_thumb_arm);
3146 /* Handle v4t short branches. */
3147 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3148 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3149 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3150 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3154 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3156 if (st_type == STT_ARM_TFUNC)
3161 && sym_sec->owner != NULL
3162 && !INTERWORK_FLAG (sym_sec->owner))
3164 (*_bfd_error_handler)
3165 (_("%B(%s): warning: interworking not enabled.\n"
3166 " first occurrence: %B: ARM call to Thumb"),
3167 sym_sec->owner, input_bfd, name);
3170 /* We have an extra 2-bytes reach because of
3171 the mode change (bit 24 (H) of BLX encoding). */
3172 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3173 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3174 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3175 || (r_type == R_ARM_JUMP24)
3176 || (r_type == R_ARM_PLT32))
3178 stub_type = (info->shared | globals->pic_veneer)
3180 ? ((globals->use_blx)
3181 /* V5T and above. */
3182 ? arm_stub_long_branch_any_thumb_pic
3184 : arm_stub_long_branch_v4t_arm_thumb_pic)
3186 /* non-PIC stubs. */
3187 : ((globals->use_blx)
3188 /* V5T and above. */
3189 ? arm_stub_long_branch_any_any
3191 : arm_stub_long_branch_v4t_arm_thumb);
3197 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3198 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3200 stub_type = (info->shared | globals->pic_veneer)
3202 ? arm_stub_long_branch_any_arm_pic
3203 /* non-PIC stubs. */
3204 : arm_stub_long_branch_any_any;
3212 /* Build a name for an entry in the stub hash table. */
3215 elf32_arm_stub_name (const asection *input_section,
3216 const asection *sym_sec,
3217 const struct elf32_arm_link_hash_entry *hash,
3218 const Elf_Internal_Rela *rel)
3225 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3226 stub_name = bfd_malloc (len);
3227 if (stub_name != NULL)
3228 sprintf (stub_name, "%08x_%s+%x",
3229 input_section->id & 0xffffffff,
3230 hash->root.root.root.string,
3231 (int) rel->r_addend & 0xffffffff);
3235 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3236 stub_name = bfd_malloc (len);
3237 if (stub_name != NULL)
3238 sprintf (stub_name, "%08x_%x:%x+%x",
3239 input_section->id & 0xffffffff,
3240 sym_sec->id & 0xffffffff,
3241 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3242 (int) rel->r_addend & 0xffffffff);
3248 /* Look up an entry in the stub hash. Stub entries are cached because
3249 creating the stub name takes a bit of time. */
3251 static struct elf32_arm_stub_hash_entry *
3252 elf32_arm_get_stub_entry (const asection *input_section,
3253 const asection *sym_sec,
3254 struct elf_link_hash_entry *hash,
3255 const Elf_Internal_Rela *rel,
3256 struct elf32_arm_link_hash_table *htab)
3258 struct elf32_arm_stub_hash_entry *stub_entry;
3259 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3260 const asection *id_sec;
3262 if ((input_section->flags & SEC_CODE) == 0)
3265 /* If this input section is part of a group of sections sharing one
3266 stub section, then use the id of the first section in the group.
3267 Stub names need to include a section id, as there may well be
3268 more than one stub used to reach say, printf, and we need to
3269 distinguish between them. */
3270 id_sec = htab->stub_group[input_section->id].link_sec;
3272 if (h != NULL && h->stub_cache != NULL
3273 && h->stub_cache->h == h
3274 && h->stub_cache->id_sec == id_sec)
3276 stub_entry = h->stub_cache;
3282 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3283 if (stub_name == NULL)
3286 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3287 stub_name, FALSE, FALSE);
3289 h->stub_cache = stub_entry;
3297 /* Find or create a stub section. Returns a pointer to the stub section, and
3298 the section to which the stub section will be attached (in *LINK_SEC_P).
3299 LINK_SEC_P may be NULL. */
3302 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3303 struct elf32_arm_link_hash_table *htab)
3308 link_sec = htab->stub_group[section->id].link_sec;
3309 stub_sec = htab->stub_group[section->id].stub_sec;
3310 if (stub_sec == NULL)
3312 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3313 if (stub_sec == NULL)
3319 namelen = strlen (link_sec->name);
3320 len = namelen + sizeof (STUB_SUFFIX);
3321 s_name = bfd_alloc (htab->stub_bfd, len);
3325 memcpy (s_name, link_sec->name, namelen);
3326 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3327 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3328 if (stub_sec == NULL)
3330 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3332 htab->stub_group[section->id].stub_sec = stub_sec;
3336 *link_sec_p = link_sec;
3341 /* Add a new stub entry to the stub hash. Not all fields of the new
3342 stub entry are initialised. */
3344 static struct elf32_arm_stub_hash_entry *
3345 elf32_arm_add_stub (const char *stub_name,
3347 struct elf32_arm_link_hash_table *htab)
3351 struct elf32_arm_stub_hash_entry *stub_entry;
3353 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3354 if (stub_sec == NULL)
3357 /* Enter this entry into the linker stub hash table. */
3358 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3360 if (stub_entry == NULL)
3362 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3368 stub_entry->stub_sec = stub_sec;
3369 stub_entry->stub_offset = 0;
3370 stub_entry->id_sec = link_sec;
3375 /* Store an Arm insn into an output section not processed by
3376 elf32_arm_write_section. */
3379 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3380 bfd * output_bfd, bfd_vma val, void * ptr)
3382 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3383 bfd_putl32 (val, ptr);
3385 bfd_putb32 (val, ptr);
3388 /* Store a 16-bit Thumb insn into an output section not processed by
3389 elf32_arm_write_section. */
3392 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3393 bfd * output_bfd, bfd_vma val, void * ptr)
3395 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3396 bfd_putl16 (val, ptr);
3398 bfd_putb16 (val, ptr);
3401 static bfd_reloc_status_type elf32_arm_final_link_relocate
3402 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3403 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3404 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3407 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3411 struct elf32_arm_stub_hash_entry *stub_entry;
3412 struct bfd_link_info *info;
3413 struct elf32_arm_link_hash_table *htab;
3421 const insn_sequence *template;
3423 struct elf32_arm_link_hash_table * globals;
3424 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3425 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3428 /* Massage our args to the form they really have. */
3429 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3430 info = (struct bfd_link_info *) in_arg;
3432 globals = elf32_arm_hash_table (info);
3434 htab = elf32_arm_hash_table (info);
3435 stub_sec = stub_entry->stub_sec;
3437 /* Make a note of the offset within the stubs for this entry. */
3438 stub_entry->stub_offset = stub_sec->size;
3439 loc = stub_sec->contents + stub_entry->stub_offset;
3441 stub_bfd = stub_sec->owner;
3443 /* This is the address of the start of the stub. */
3444 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3445 + stub_entry->stub_offset;
3447 /* This is the address of the stub destination. */
3448 sym_value = (stub_entry->target_value
3449 + stub_entry->target_section->output_offset
3450 + stub_entry->target_section->output_section->vma);
3452 template = stub_entry->stub_template;
3453 template_size = stub_entry->stub_template_size;
3456 for (i = 0; i < template_size; i++)
3458 switch (template[i].type)
3462 bfd_vma data = template[i].data;
3463 if (template[i].reloc_addend != 0)
3465 /* We've borrowed the reloc_addend field to mean we should
3466 insert a condition code into this (Thumb-1 branch)
3467 instruction. See THUMB16_BCOND_INSN. */
3468 BFD_ASSERT ((data & 0xff00) == 0xd000);
3469 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3471 put_thumb_insn (globals, stub_bfd, data, loc + size);
3477 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3479 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3481 if (template[i].r_type != R_ARM_NONE)
3483 stub_reloc_idx[nrelocs] = i;
3484 stub_reloc_offset[nrelocs++] = size;
3490 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3491 /* Handle cases where the target is encoded within the
3493 if (template[i].r_type == R_ARM_JUMP24)
3495 stub_reloc_idx[nrelocs] = i;
3496 stub_reloc_offset[nrelocs++] = size;
3502 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3503 stub_reloc_idx[nrelocs] = i;
3504 stub_reloc_offset[nrelocs++] = size;
3514 stub_sec->size += size;
3516 /* Stub size has already been computed in arm_size_one_stub. Check
3518 BFD_ASSERT (size == stub_entry->stub_size);
3520 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3521 if (stub_entry->st_type == STT_ARM_TFUNC)
3524 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3526 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3528 for (i = 0; i < nrelocs; i++)
3529 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3530 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3531 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3532 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3534 Elf_Internal_Rela rel;
3535 bfd_boolean unresolved_reloc;
3536 char *error_message;
3538 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3539 ? STT_ARM_TFUNC : 0;
3540 bfd_vma points_to = sym_value + stub_entry->target_addend;
3542 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3543 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3544 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3546 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3547 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3548 template should refer back to the instruction after the original
3550 points_to = sym_value;
3552 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3553 properly. We should probably use this function unconditionally,
3554 rather than only for certain relocations listed in the enclosing
3555 conditional, for the sake of consistency. */
3556 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3557 (template[stub_reloc_idx[i]].r_type),
3558 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3559 points_to, info, stub_entry->target_section, "", sym_flags,
3560 (struct elf_link_hash_entry *) stub_entry, &unresolved_reloc,
3565 _bfd_final_link_relocate (elf32_arm_howto_from_type
3566 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3567 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3568 sym_value + stub_entry->target_addend,
3569 template[stub_reloc_idx[i]].reloc_addend);
3576 /* Calculate the template, template size and instruction size for a stub.
3577 Return value is the instruction size. */
3580 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3581 const insn_sequence **stub_template,
3582 int *stub_template_size)
3584 const insn_sequence *template = NULL;
3585 int template_size = 0, i;
3588 template = stub_definitions[stub_type].template;
3589 template_size = stub_definitions[stub_type].template_size;
3592 for (i = 0; i < template_size; i++)
3594 switch (template[i].type)
3613 *stub_template = template;
3615 if (stub_template_size)
3616 *stub_template_size = template_size;
3621 /* As above, but don't actually build the stub. Just bump offset so
3622 we know stub section sizes. */
3625 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3628 struct elf32_arm_stub_hash_entry *stub_entry;
3629 struct elf32_arm_link_hash_table *htab;
3630 const insn_sequence *template;
3631 int template_size, size;
3633 /* Massage our args to the form they really have. */
3634 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3635 htab = (struct elf32_arm_link_hash_table *) in_arg;
3637 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3638 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3640 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3643 stub_entry->stub_size = size;
3644 stub_entry->stub_template = template;
3645 stub_entry->stub_template_size = template_size;
3647 size = (size + 7) & ~7;
3648 stub_entry->stub_sec->size += size;
3653 /* External entry points for sizing and building linker stubs. */
3655 /* Set up various things so that we can make a list of input sections
3656 for each output section included in the link. Returns -1 on error,
3657 0 when no stubs will be needed, and 1 on success. */
3660 elf32_arm_setup_section_lists (bfd *output_bfd,
3661 struct bfd_link_info *info)
3664 unsigned int bfd_count;
3665 int top_id, top_index;
3667 asection **input_list, **list;
3669 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3671 if (! is_elf_hash_table (htab))
3674 /* Count the number of input BFDs and find the top input section id. */
3675 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3677 input_bfd = input_bfd->link_next)
3680 for (section = input_bfd->sections;
3682 section = section->next)
3684 if (top_id < section->id)
3685 top_id = section->id;
3688 htab->bfd_count = bfd_count;
3690 amt = sizeof (struct map_stub) * (top_id + 1);
3691 htab->stub_group = bfd_zmalloc (amt);
3692 if (htab->stub_group == NULL)
3695 /* We can't use output_bfd->section_count here to find the top output
3696 section index as some sections may have been removed, and
3697 _bfd_strip_section_from_output doesn't renumber the indices. */
3698 for (section = output_bfd->sections, top_index = 0;
3700 section = section->next)
3702 if (top_index < section->index)
3703 top_index = section->index;
3706 htab->top_index = top_index;
3707 amt = sizeof (asection *) * (top_index + 1);
3708 input_list = bfd_malloc (amt);
3709 htab->input_list = input_list;
3710 if (input_list == NULL)
3713 /* For sections we aren't interested in, mark their entries with a
3714 value we can check later. */
3715 list = input_list + top_index;
3717 *list = bfd_abs_section_ptr;
3718 while (list-- != input_list);
3720 for (section = output_bfd->sections;
3722 section = section->next)
3724 if ((section->flags & SEC_CODE) != 0)
3725 input_list[section->index] = NULL;
3731 /* The linker repeatedly calls this function for each input section,
3732 in the order that input sections are linked into output sections.
3733 Build lists of input sections to determine groupings between which
3734 we may insert linker stubs. */
3737 elf32_arm_next_input_section (struct bfd_link_info *info,
3740 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3742 if (isec->output_section->index <= htab->top_index)
3744 asection **list = htab->input_list + isec->output_section->index;
3746 if (*list != bfd_abs_section_ptr)
3748 /* Steal the link_sec pointer for our list. */
3749 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3750 /* This happens to make the list in reverse order,
3751 which we reverse later. */
3752 PREV_SEC (isec) = *list;
3758 /* See whether we can group stub sections together. Grouping stub
3759 sections may result in fewer stubs. More importantly, we need to
3760 put all .init* and .fini* stubs at the end of the .init or
3761 .fini output sections respectively, because glibc splits the
3762 _init and _fini functions into multiple parts. Putting a stub in
3763 the middle of a function is not a good idea. */
3766 group_sections (struct elf32_arm_link_hash_table *htab,
3767 bfd_size_type stub_group_size,
3768 bfd_boolean stubs_always_after_branch)
3770 asection **list = htab->input_list;
3774 asection *tail = *list;
3777 if (tail == bfd_abs_section_ptr)
3780 /* Reverse the list: we must avoid placing stubs at the
3781 beginning of the section because the beginning of the text
3782 section may be required for an interrupt vector in bare metal
3784 #define NEXT_SEC PREV_SEC
3786 while (tail != NULL)
3788 /* Pop from tail. */
3789 asection *item = tail;
3790 tail = PREV_SEC (item);
3793 NEXT_SEC (item) = head;
3797 while (head != NULL)
3801 bfd_vma stub_group_start = head->output_offset;
3802 bfd_vma end_of_next;
3805 while (NEXT_SEC (curr) != NULL)
3807 next = NEXT_SEC (curr);
3808 end_of_next = next->output_offset + next->size;
3809 if (end_of_next - stub_group_start >= stub_group_size)
3810 /* End of NEXT is too far from start, so stop. */
3812 /* Add NEXT to the group. */
3816 /* OK, the size from the start to the start of CURR is less
3817 than stub_group_size and thus can be handled by one stub
3818 section. (Or the head section is itself larger than
3819 stub_group_size, in which case we may be toast.)
3820 We should really be keeping track of the total size of
3821 stubs added here, as stubs contribute to the final output
3825 next = NEXT_SEC (head);
3826 /* Set up this stub group. */
3827 htab->stub_group[head->id].link_sec = curr;
3829 while (head != curr && (head = next) != NULL);
3831 /* But wait, there's more! Input sections up to stub_group_size
3832 bytes after the stub section can be handled by it too. */
3833 if (!stubs_always_after_branch)
3835 stub_group_start = curr->output_offset + curr->size;
3837 while (next != NULL)
3839 end_of_next = next->output_offset + next->size;
3840 if (end_of_next - stub_group_start >= stub_group_size)
3841 /* End of NEXT is too far from stubs, so stop. */
3843 /* Add NEXT to the stub group. */
3845 next = NEXT_SEC (head);
3846 htab->stub_group[head->id].link_sec = curr;
3852 while (list++ != htab->input_list + htab->top_index);
3854 free (htab->input_list);
3859 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3863 a8_reloc_compare (const void *a, const void *b)
3865 const struct a8_erratum_reloc *ra = a, *rb = b;
3867 if (ra->from < rb->from)
3869 else if (ra->from > rb->from)
3875 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3876 const char *, char **);
3878 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3879 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3880 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3884 cortex_a8_erratum_scan (bfd *input_bfd,
3885 struct bfd_link_info *info,
3886 struct a8_erratum_fix **a8_fixes_p,
3887 unsigned int *num_a8_fixes_p,
3888 unsigned int *a8_fix_table_size_p,
3889 struct a8_erratum_reloc *a8_relocs,
3890 unsigned int num_a8_relocs)
3893 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3894 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3895 unsigned int num_a8_fixes = *num_a8_fixes_p;
3896 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3898 for (section = input_bfd->sections;
3900 section = section->next)
3902 bfd_byte *contents = NULL;
3903 struct _arm_elf_section_data *sec_data;
3907 if (elf_section_type (section) != SHT_PROGBITS
3908 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3909 || (section->flags & SEC_EXCLUDE) != 0
3910 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3911 || (section->output_section == bfd_abs_section_ptr))
3914 base_vma = section->output_section->vma + section->output_offset;
3916 if (elf_section_data (section)->this_hdr.contents != NULL)
3917 contents = elf_section_data (section)->this_hdr.contents;
3918 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3921 sec_data = elf32_arm_section_data (section);
3923 for (span = 0; span < sec_data->mapcount; span++)
3925 unsigned int span_start = sec_data->map[span].vma;
3926 unsigned int span_end = (span == sec_data->mapcount - 1)
3927 ? section->size : sec_data->map[span + 1].vma;
3929 char span_type = sec_data->map[span].type;
3930 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3932 if (span_type != 't')
3935 /* Span is entirely within a single 4KB region: skip scanning. */
3936 if (((base_vma + span_start) & ~0xfff)
3937 == ((base_vma + span_end) & ~0xfff))
3940 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3942 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3943 * The branch target is in the same 4KB region as the
3944 first half of the branch.
3945 * The instruction before the branch is a 32-bit
3946 length non-branch instruction. */
3947 for (i = span_start; i < span_end;)
3949 unsigned int insn = bfd_getl16 (&contents[i]);
3950 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3951 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3953 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3958 /* Load the rest of the insn (in manual-friendly order). */
3959 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3961 /* Encoding T4: B<c>.W. */
3962 is_b = (insn & 0xf800d000) == 0xf0009000;
3963 /* Encoding T1: BL<c>.W. */
3964 is_bl = (insn & 0xf800d000) == 0xf000d000;
3965 /* Encoding T2: BLX<c>.W. */
3966 is_blx = (insn & 0xf800d000) == 0xf000c000;
3967 /* Encoding T3: B<c>.W (not permitted in IT block). */
3968 is_bcc = (insn & 0xf800d000) == 0xf0008000
3969 && (insn & 0x07f00000) != 0x03800000;
3972 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3974 if (((base_vma + i) & 0xfff) == 0xffe
3978 && ! last_was_branch)
3980 bfd_signed_vma offset;
3981 bfd_boolean force_target_arm = FALSE;
3982 bfd_boolean force_target_thumb = FALSE;
3984 enum elf32_arm_stub_type stub_type = arm_stub_none;
3985 struct a8_erratum_reloc key, *found;
3987 key.from = base_vma + i;
3988 found = bsearch (&key, a8_relocs, num_a8_relocs,
3989 sizeof (struct a8_erratum_reloc),
3994 char *error_message = NULL;
3995 struct elf_link_hash_entry *entry;
3997 /* We don't care about the error returned from this
3998 function, only if there is glue or not. */
3999 entry = find_thumb_glue (info, found->sym_name,
4003 found->non_a8_stub = TRUE;
4005 if (found->r_type == R_ARM_THM_CALL
4006 && found->st_type != STT_ARM_TFUNC)
4007 force_target_arm = TRUE;
4008 else if (found->r_type == R_ARM_THM_CALL
4009 && found->st_type == STT_ARM_TFUNC)
4010 force_target_thumb = TRUE;
4013 /* Check if we have an offending branch instruction. */
4015 if (found && found->non_a8_stub)
4016 /* We've already made a stub for this instruction, e.g.
4017 it's a long branch or a Thumb->ARM stub. Assume that
4018 stub will suffice to work around the A8 erratum (see
4019 setting of always_after_branch above). */
4023 offset = (insn & 0x7ff) << 1;
4024 offset |= (insn & 0x3f0000) >> 4;
4025 offset |= (insn & 0x2000) ? 0x40000 : 0;
4026 offset |= (insn & 0x800) ? 0x80000 : 0;
4027 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4028 if (offset & 0x100000)
4029 offset |= ~ ((bfd_signed_vma) 0xfffff);
4030 stub_type = arm_stub_a8_veneer_b_cond;
4032 else if (is_b || is_bl || is_blx)
4034 int s = (insn & 0x4000000) != 0;
4035 int j1 = (insn & 0x2000) != 0;
4036 int j2 = (insn & 0x800) != 0;
4040 offset = (insn & 0x7ff) << 1;
4041 offset |= (insn & 0x3ff0000) >> 4;
4045 if (offset & 0x1000000)
4046 offset |= ~ ((bfd_signed_vma) 0xffffff);
4049 offset &= ~ ((bfd_signed_vma) 3);
4051 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4052 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4055 if (stub_type != arm_stub_none)
4057 bfd_vma pc_for_insn = base_vma + i + 4;
4059 /* The original instruction is a BL, but the target is
4060 an ARM instruction. If we were not making a stub,
4061 the BL would have been converted to a BLX. Use the
4062 BLX stub instead in that case. */
4063 if (htab->use_blx && force_target_arm
4064 && stub_type == arm_stub_a8_veneer_bl)
4066 stub_type = arm_stub_a8_veneer_blx;
4070 /* Conversely, if the original instruction was
4071 BLX but the target is Thumb mode, use the BL
4073 else if (force_target_thumb
4074 && stub_type == arm_stub_a8_veneer_blx)
4076 stub_type = arm_stub_a8_veneer_bl;
4082 pc_for_insn &= ~ ((bfd_vma) 3);
4084 /* If we found a relocation, use the proper destination,
4085 not the offset in the (unrelocated) instruction.
4086 Note this is always done if we switched the stub type
4090 (bfd_signed_vma) (found->destination - pc_for_insn);
4092 target = pc_for_insn + offset;
4094 /* The BLX stub is ARM-mode code. Adjust the offset to
4095 take the different PC value (+8 instead of +4) into
4097 if (stub_type == arm_stub_a8_veneer_blx)
4100 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4104 if (num_a8_fixes == a8_fix_table_size)
4106 a8_fix_table_size *= 2;
4107 a8_fixes = bfd_realloc (a8_fixes,
4108 sizeof (struct a8_erratum_fix)
4109 * a8_fix_table_size);
4112 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4113 if (stub_name != NULL)
4114 sprintf (stub_name, "%x:%x", section->id, i);
4116 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4117 a8_fixes[num_a8_fixes].section = section;
4118 a8_fixes[num_a8_fixes].offset = i;
4119 a8_fixes[num_a8_fixes].addend = offset;
4120 a8_fixes[num_a8_fixes].orig_insn = insn;
4121 a8_fixes[num_a8_fixes].stub_name = stub_name;
4122 a8_fixes[num_a8_fixes].stub_type = stub_type;
4129 i += insn_32bit ? 4 : 2;
4130 last_was_32bit = insn_32bit;
4131 last_was_branch = is_32bit_branch;
4135 if (elf_section_data (section)->this_hdr.contents == NULL)
4139 *a8_fixes_p = a8_fixes;
4140 *num_a8_fixes_p = num_a8_fixes;
4141 *a8_fix_table_size_p = a8_fix_table_size;
4146 /* Determine and set the size of the stub section for a final link.
4148 The basic idea here is to examine all the relocations looking for
4149 PC-relative calls to a target that is unreachable with a "bl"
4153 elf32_arm_size_stubs (bfd *output_bfd,
4155 struct bfd_link_info *info,
4156 bfd_signed_vma group_size,
4157 asection * (*add_stub_section) (const char *, asection *),
4158 void (*layout_sections_again) (void))
4160 bfd_size_type stub_group_size;
4161 bfd_boolean stubs_always_after_branch;
4162 bfd_boolean stub_changed = 0;
4163 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4164 struct a8_erratum_fix *a8_fixes = NULL;
4165 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4166 struct a8_erratum_reloc *a8_relocs = NULL;
4167 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4169 if (htab->fix_cortex_a8)
4171 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4172 * a8_fix_table_size);
4173 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4174 * a8_reloc_table_size);
4177 /* Propagate mach to stub bfd, because it may not have been
4178 finalized when we created stub_bfd. */
4179 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4180 bfd_get_mach (output_bfd));
4182 /* Stash our params away. */
4183 htab->stub_bfd = stub_bfd;
4184 htab->add_stub_section = add_stub_section;
4185 htab->layout_sections_again = layout_sections_again;
4186 stubs_always_after_branch = group_size < 0;
4188 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4189 as the first half of a 32-bit branch straddling two 4K pages. This is a
4190 crude way of enforcing that. */
4191 if (htab->fix_cortex_a8)
4192 stubs_always_after_branch = 1;
4195 stub_group_size = -group_size;
4197 stub_group_size = group_size;
4199 if (stub_group_size == 1)
4201 /* Default values. */
4202 /* Thumb branch range is +-4MB has to be used as the default
4203 maximum size (a given section can contain both ARM and Thumb
4204 code, so the worst case has to be taken into account).
4206 This value is 24K less than that, which allows for 2025
4207 12-byte stubs. If we exceed that, then we will fail to link.
4208 The user will have to relink with an explicit group size
4210 stub_group_size = 4170000;
4213 group_sections (htab, stub_group_size, stubs_always_after_branch);
4218 unsigned int bfd_indx;
4223 for (input_bfd = info->input_bfds, bfd_indx = 0;
4225 input_bfd = input_bfd->link_next, bfd_indx++)
4227 Elf_Internal_Shdr *symtab_hdr;
4229 Elf_Internal_Sym *local_syms = NULL;
4233 /* We'll need the symbol table in a second. */
4234 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4235 if (symtab_hdr->sh_info == 0)
4238 /* Walk over each section attached to the input bfd. */
4239 for (section = input_bfd->sections;
4241 section = section->next)
4243 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4245 /* If there aren't any relocs, then there's nothing more
4247 if ((section->flags & SEC_RELOC) == 0
4248 || section->reloc_count == 0
4249 || (section->flags & SEC_CODE) == 0)
4252 /* If this section is a link-once section that will be
4253 discarded, then don't create any stubs. */
4254 if (section->output_section == NULL
4255 || section->output_section->owner != output_bfd)
4258 /* Get the relocs. */
4260 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4261 NULL, info->keep_memory);
4262 if (internal_relocs == NULL)
4263 goto error_ret_free_local;
4265 /* Now examine each relocation. */
4266 irela = internal_relocs;
4267 irelaend = irela + section->reloc_count;
4268 for (; irela < irelaend; irela++)
4270 unsigned int r_type, r_indx;
4271 enum elf32_arm_stub_type stub_type;
4272 struct elf32_arm_stub_hash_entry *stub_entry;
4275 bfd_vma destination;
4276 struct elf32_arm_link_hash_entry *hash;
4277 const char *sym_name;
4279 const asection *id_sec;
4280 unsigned char st_type;
4281 bfd_boolean created_stub = FALSE;
4283 r_type = ELF32_R_TYPE (irela->r_info);
4284 r_indx = ELF32_R_SYM (irela->r_info);
4286 if (r_type >= (unsigned int) R_ARM_max)
4288 bfd_set_error (bfd_error_bad_value);
4289 error_ret_free_internal:
4290 if (elf_section_data (section)->relocs == NULL)
4291 free (internal_relocs);
4292 goto error_ret_free_local;
4295 /* Only look for stubs on branch instructions. */
4296 if ((r_type != (unsigned int) R_ARM_CALL)
4297 && (r_type != (unsigned int) R_ARM_THM_CALL)
4298 && (r_type != (unsigned int) R_ARM_JUMP24)
4299 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4300 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4301 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4302 && (r_type != (unsigned int) R_ARM_PLT32))
4305 /* Now determine the call target, its name, value,
4312 if (r_indx < symtab_hdr->sh_info)
4314 /* It's a local symbol. */
4315 Elf_Internal_Sym *sym;
4316 Elf_Internal_Shdr *hdr;
4318 if (local_syms == NULL)
4321 = (Elf_Internal_Sym *) symtab_hdr->contents;
4322 if (local_syms == NULL)
4324 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4325 symtab_hdr->sh_info, 0,
4327 if (local_syms == NULL)
4328 goto error_ret_free_internal;
4331 sym = local_syms + r_indx;
4332 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4333 sym_sec = hdr->bfd_section;
4334 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4335 sym_value = sym->st_value;
4336 destination = (sym_value + irela->r_addend
4337 + sym_sec->output_offset
4338 + sym_sec->output_section->vma);
4339 st_type = ELF_ST_TYPE (sym->st_info);
4341 = bfd_elf_string_from_elf_section (input_bfd,
4342 symtab_hdr->sh_link,
4347 /* It's an external symbol. */
4350 e_indx = r_indx - symtab_hdr->sh_info;
4351 hash = ((struct elf32_arm_link_hash_entry *)
4352 elf_sym_hashes (input_bfd)[e_indx]);
4354 while (hash->root.root.type == bfd_link_hash_indirect
4355 || hash->root.root.type == bfd_link_hash_warning)
4356 hash = ((struct elf32_arm_link_hash_entry *)
4357 hash->root.root.u.i.link);
4359 if (hash->root.root.type == bfd_link_hash_defined
4360 || hash->root.root.type == bfd_link_hash_defweak)
4362 sym_sec = hash->root.root.u.def.section;
4363 sym_value = hash->root.root.u.def.value;
4365 struct elf32_arm_link_hash_table *globals =
4366 elf32_arm_hash_table (info);
4368 /* For a destination in a shared library,
4369 use the PLT stub as target address to
4370 decide whether a branch stub is
4372 if (globals->splt != NULL && hash != NULL
4373 && hash->root.plt.offset != (bfd_vma) -1)
4375 sym_sec = globals->splt;
4376 sym_value = hash->root.plt.offset;
4377 if (sym_sec->output_section != NULL)
4378 destination = (sym_value
4379 + sym_sec->output_offset
4380 + sym_sec->output_section->vma);
4382 else if (sym_sec->output_section != NULL)
4383 destination = (sym_value + irela->r_addend
4384 + sym_sec->output_offset
4385 + sym_sec->output_section->vma);
4387 else if ((hash->root.root.type == bfd_link_hash_undefined)
4388 || (hash->root.root.type == bfd_link_hash_undefweak))
4390 /* For a shared library, use the PLT stub as
4391 target address to decide whether a long
4392 branch stub is needed.
4393 For absolute code, they cannot be handled. */
4394 struct elf32_arm_link_hash_table *globals =
4395 elf32_arm_hash_table (info);
4397 if (globals->splt != NULL && hash != NULL
4398 && hash->root.plt.offset != (bfd_vma) -1)
4400 sym_sec = globals->splt;
4401 sym_value = hash->root.plt.offset;
4402 if (sym_sec->output_section != NULL)
4403 destination = (sym_value
4404 + sym_sec->output_offset
4405 + sym_sec->output_section->vma);
4412 bfd_set_error (bfd_error_bad_value);
4413 goto error_ret_free_internal;
4415 st_type = ELF_ST_TYPE (hash->root.type);
4416 sym_name = hash->root.root.root.string;
4421 /* Determine what (if any) linker stub is needed. */
4422 stub_type = arm_type_of_stub (info, section, irela,
4424 destination, sym_sec,
4425 input_bfd, sym_name);
4426 if (stub_type == arm_stub_none)
4429 /* Support for grouping stub sections. */
4430 id_sec = htab->stub_group[section->id].link_sec;
4432 /* Get the name of this stub. */
4433 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4436 goto error_ret_free_internal;
4438 /* We've either created a stub for this reloc already,
4439 or we are about to. */
4440 created_stub = TRUE;
4442 stub_entry = arm_stub_hash_lookup
4443 (&htab->stub_hash_table, stub_name,
4445 if (stub_entry != NULL)
4447 /* The proper stub has already been created. */
4452 stub_entry = elf32_arm_add_stub (stub_name, section,
4454 if (stub_entry == NULL)
4457 goto error_ret_free_internal;
4460 stub_entry->target_value = sym_value;
4461 stub_entry->target_section = sym_sec;
4462 stub_entry->stub_type = stub_type;
4463 stub_entry->h = hash;
4464 stub_entry->st_type = st_type;
4466 if (sym_name == NULL)
4467 sym_name = "unnamed";
4468 stub_entry->output_name
4469 = bfd_alloc (htab->stub_bfd,
4470 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4471 + strlen (sym_name));
4472 if (stub_entry->output_name == NULL)
4475 goto error_ret_free_internal;
4478 /* For historical reasons, use the existing names for
4479 ARM-to-Thumb and Thumb-to-ARM stubs. */
4480 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4481 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4482 && st_type != STT_ARM_TFUNC)
4483 sprintf (stub_entry->output_name,
4484 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4485 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4486 || (r_type == (unsigned int) R_ARM_JUMP24))
4487 && st_type == STT_ARM_TFUNC)
4488 sprintf (stub_entry->output_name,
4489 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4491 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4494 stub_changed = TRUE;
4498 /* Look for relocations which might trigger Cortex-A8
4500 if (htab->fix_cortex_a8
4501 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4502 || r_type == (unsigned int) R_ARM_THM_JUMP19
4503 || r_type == (unsigned int) R_ARM_THM_CALL
4504 || r_type == (unsigned int) R_ARM_THM_XPC22))
4506 bfd_vma from = section->output_section->vma
4507 + section->output_offset
4510 if ((from & 0xfff) == 0xffe)
4512 /* Found a candidate. Note we haven't checked the
4513 destination is within 4K here: if we do so (and
4514 don't create an entry in a8_relocs) we can't tell
4515 that a branch should have been relocated when
4517 if (num_a8_relocs == a8_reloc_table_size)
4519 a8_reloc_table_size *= 2;
4520 a8_relocs = bfd_realloc (a8_relocs,
4521 sizeof (struct a8_erratum_reloc)
4522 * a8_reloc_table_size);
4525 a8_relocs[num_a8_relocs].from = from;
4526 a8_relocs[num_a8_relocs].destination = destination;
4527 a8_relocs[num_a8_relocs].r_type = r_type;
4528 a8_relocs[num_a8_relocs].st_type = st_type;
4529 a8_relocs[num_a8_relocs].sym_name = sym_name;
4530 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4537 /* We're done with the internal relocs, free them. */
4538 if (elf_section_data (section)->relocs == NULL)
4539 free (internal_relocs);
4542 if (htab->fix_cortex_a8)
4544 /* Sort relocs which might apply to Cortex-A8 erratum. */
4545 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4548 /* Scan for branches which might trigger Cortex-A8 erratum. */
4549 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4550 &num_a8_fixes, &a8_fix_table_size,
4551 a8_relocs, num_a8_relocs) != 0)
4552 goto error_ret_free_local;
4556 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4557 stub_changed = TRUE;
4562 /* OK, we've added some stubs. Find out the new size of the
4564 for (stub_sec = htab->stub_bfd->sections;
4566 stub_sec = stub_sec->next)
4568 /* Ignore non-stub sections. */
4569 if (!strstr (stub_sec->name, STUB_SUFFIX))
4575 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4577 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4578 if (htab->fix_cortex_a8)
4579 for (i = 0; i < num_a8_fixes; i++)
4581 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4582 a8_fixes[i].section, htab);
4584 if (stub_sec == NULL)
4585 goto error_ret_free_local;
4588 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4593 /* Ask the linker to do its stuff. */
4594 (*htab->layout_sections_again) ();
4595 stub_changed = FALSE;
4596 prev_num_a8_fixes = num_a8_fixes;
4599 /* Add stubs for Cortex-A8 erratum fixes now. */
4600 if (htab->fix_cortex_a8)
4602 for (i = 0; i < num_a8_fixes; i++)
4604 struct elf32_arm_stub_hash_entry *stub_entry;
4605 char *stub_name = a8_fixes[i].stub_name;
4606 asection *section = a8_fixes[i].section;
4607 unsigned int section_id = a8_fixes[i].section->id;
4608 asection *link_sec = htab->stub_group[section_id].link_sec;
4609 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4610 const insn_sequence *template;
4611 int template_size, size = 0;
4613 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4615 if (stub_entry == NULL)
4617 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4623 stub_entry->stub_sec = stub_sec;
4624 stub_entry->stub_offset = 0;
4625 stub_entry->id_sec = link_sec;
4626 stub_entry->stub_type = a8_fixes[i].stub_type;
4627 stub_entry->target_section = a8_fixes[i].section;
4628 stub_entry->target_value = a8_fixes[i].offset;
4629 stub_entry->target_addend = a8_fixes[i].addend;
4630 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4631 stub_entry->st_type = STT_ARM_TFUNC;
4633 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4636 stub_entry->stub_size = size;
4637 stub_entry->stub_template = template;
4638 stub_entry->stub_template_size = template_size;
4641 /* Stash the Cortex-A8 erratum fix array for use later in
4642 elf32_arm_write_section(). */
4643 htab->a8_erratum_fixes = a8_fixes;
4644 htab->num_a8_erratum_fixes = num_a8_fixes;
4648 htab->a8_erratum_fixes = NULL;
4649 htab->num_a8_erratum_fixes = 0;
4653 error_ret_free_local:
4657 /* Build all the stubs associated with the current output file. The
4658 stubs are kept in a hash table attached to the main linker hash
4659 table. We also set up the .plt entries for statically linked PIC
4660 functions here. This function is called via arm_elf_finish in the
4664 elf32_arm_build_stubs (struct bfd_link_info *info)
4667 struct bfd_hash_table *table;
4668 struct elf32_arm_link_hash_table *htab;
4670 htab = elf32_arm_hash_table (info);
4672 for (stub_sec = htab->stub_bfd->sections;
4674 stub_sec = stub_sec->next)
4678 /* Ignore non-stub sections. */
4679 if (!strstr (stub_sec->name, STUB_SUFFIX))
4682 /* Allocate memory to hold the linker stubs. */
4683 size = stub_sec->size;
4684 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4685 if (stub_sec->contents == NULL && size != 0)
4690 /* Build the stubs as directed by the stub hash table. */
4691 table = &htab->stub_hash_table;
4692 bfd_hash_traverse (table, arm_build_one_stub, info);
4697 /* Locate the Thumb encoded calling stub for NAME. */
4699 static struct elf_link_hash_entry *
4700 find_thumb_glue (struct bfd_link_info *link_info,
4702 char **error_message)
4705 struct elf_link_hash_entry *hash;
4706 struct elf32_arm_link_hash_table *hash_table;
4708 /* We need a pointer to the armelf specific hash table. */
4709 hash_table = elf32_arm_hash_table (link_info);
4711 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4712 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4714 BFD_ASSERT (tmp_name);
4716 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4718 hash = elf_link_hash_lookup
4719 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4722 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4723 tmp_name, name) == -1)
4724 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4731 /* Locate the ARM encoded calling stub for NAME. */
4733 static struct elf_link_hash_entry *
4734 find_arm_glue (struct bfd_link_info *link_info,
4736 char **error_message)
4739 struct elf_link_hash_entry *myh;
4740 struct elf32_arm_link_hash_table *hash_table;
4742 /* We need a pointer to the elfarm specific hash table. */
4743 hash_table = elf32_arm_hash_table (link_info);
4745 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4746 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4748 BFD_ASSERT (tmp_name);
4750 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4752 myh = elf_link_hash_lookup
4753 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4756 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4757 tmp_name, name) == -1)
4758 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4765 /* ARM->Thumb glue (static images):
4769 ldr r12, __func_addr
4772 .word func @ behave as if you saw a ARM_32 reloc.
4779 .word func @ behave as if you saw a ARM_32 reloc.
4781 (relocatable images)
4784 ldr r12, __func_offset
4790 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4791 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4792 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4793 static const insn32 a2t3_func_addr_insn = 0x00000001;
4795 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4796 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4797 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4799 #define ARM2THUMB_PIC_GLUE_SIZE 16
4800 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4801 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4802 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4804 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4808 __func_from_thumb: __func_from_thumb:
4810 nop ldr r6, __func_addr
4820 #define THUMB2ARM_GLUE_SIZE 8
4821 static const insn16 t2a1_bx_pc_insn = 0x4778;
4822 static const insn16 t2a2_noop_insn = 0x46c0;
4823 static const insn32 t2a3_b_insn = 0xea000000;
4825 #define VFP11_ERRATUM_VENEER_SIZE 8
4827 #define ARM_BX_VENEER_SIZE 12
4828 static const insn32 armbx1_tst_insn = 0xe3100001;
4829 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4830 static const insn32 armbx3_bx_insn = 0xe12fff10;
4832 #ifndef ELFARM_NABI_C_INCLUDED
4834 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4837 bfd_byte * contents;
4841 /* Do not include empty glue sections in the output. */
4844 s = bfd_get_section_by_name (abfd, name);
4846 s->flags |= SEC_EXCLUDE;
4851 BFD_ASSERT (abfd != NULL);
4853 s = bfd_get_section_by_name (abfd, name);
4854 BFD_ASSERT (s != NULL);
4856 contents = bfd_alloc (abfd, size);
4858 BFD_ASSERT (s->size == size);
4859 s->contents = contents;
4863 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4865 struct elf32_arm_link_hash_table * globals;
4867 globals = elf32_arm_hash_table (info);
4868 BFD_ASSERT (globals != NULL);
4870 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4871 globals->arm_glue_size,
4872 ARM2THUMB_GLUE_SECTION_NAME);
4874 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4875 globals->thumb_glue_size,
4876 THUMB2ARM_GLUE_SECTION_NAME);
4878 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4879 globals->vfp11_erratum_glue_size,
4880 VFP11_ERRATUM_VENEER_SECTION_NAME);
4882 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4883 globals->bx_glue_size,
4884 ARM_BX_GLUE_SECTION_NAME);
4889 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4890 returns the symbol identifying the stub. */
4892 static struct elf_link_hash_entry *
4893 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4894 struct elf_link_hash_entry * h)
4896 const char * name = h->root.root.string;
4899 struct elf_link_hash_entry * myh;
4900 struct bfd_link_hash_entry * bh;
4901 struct elf32_arm_link_hash_table * globals;
4905 globals = elf32_arm_hash_table (link_info);
4907 BFD_ASSERT (globals != NULL);
4908 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4910 s = bfd_get_section_by_name
4911 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4913 BFD_ASSERT (s != NULL);
4915 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4917 BFD_ASSERT (tmp_name);
4919 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4921 myh = elf_link_hash_lookup
4922 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4926 /* We've already seen this guy. */
4931 /* The only trick here is using hash_table->arm_glue_size as the value.
4932 Even though the section isn't allocated yet, this is where we will be
4933 putting it. The +1 on the value marks that the stub has not been
4934 output yet - not that it is a Thumb function. */
4936 val = globals->arm_glue_size + 1;
4937 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4938 tmp_name, BSF_GLOBAL, s, val,
4939 NULL, TRUE, FALSE, &bh);
4941 myh = (struct elf_link_hash_entry *) bh;
4942 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4943 myh->forced_local = 1;
4947 if (link_info->shared || globals->root.is_relocatable_executable
4948 || globals->pic_veneer)
4949 size = ARM2THUMB_PIC_GLUE_SIZE;
4950 else if (globals->use_blx)
4951 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4953 size = ARM2THUMB_STATIC_GLUE_SIZE;
4956 globals->arm_glue_size += size;
4961 /* Allocate space for ARMv4 BX veneers. */
4964 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4967 struct elf32_arm_link_hash_table *globals;
4969 struct elf_link_hash_entry *myh;
4970 struct bfd_link_hash_entry *bh;
4973 /* BX PC does not need a veneer. */
4977 globals = elf32_arm_hash_table (link_info);
4979 BFD_ASSERT (globals != NULL);
4980 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4982 /* Check if this veneer has already been allocated. */
4983 if (globals->bx_glue_offset[reg])
4986 s = bfd_get_section_by_name
4987 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4989 BFD_ASSERT (s != NULL);
4991 /* Add symbol for veneer. */
4992 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
4994 BFD_ASSERT (tmp_name);
4996 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
4998 myh = elf_link_hash_lookup
4999 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5001 BFD_ASSERT (myh == NULL);
5004 val = globals->bx_glue_size;
5005 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5006 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5007 NULL, TRUE, FALSE, &bh);
5009 myh = (struct elf_link_hash_entry *) bh;
5010 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5011 myh->forced_local = 1;
5013 s->size += ARM_BX_VENEER_SIZE;
5014 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5015 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5019 /* Add an entry to the code/data map for section SEC. */
5022 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5024 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5025 unsigned int newidx;
5027 if (sec_data->map == NULL)
5029 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5030 sec_data->mapcount = 0;
5031 sec_data->mapsize = 1;
5034 newidx = sec_data->mapcount++;
5036 if (sec_data->mapcount > sec_data->mapsize)
5038 sec_data->mapsize *= 2;
5039 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5040 * sizeof (elf32_arm_section_map));
5045 sec_data->map[newidx].vma = vma;
5046 sec_data->map[newidx].type = type;
5051 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5052 veneers are handled for now. */
5055 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5056 elf32_vfp11_erratum_list *branch,
5058 asection *branch_sec,
5059 unsigned int offset)
5062 struct elf32_arm_link_hash_table *hash_table;
5064 struct elf_link_hash_entry *myh;
5065 struct bfd_link_hash_entry *bh;
5067 struct _arm_elf_section_data *sec_data;
5069 elf32_vfp11_erratum_list *newerr;
5071 hash_table = elf32_arm_hash_table (link_info);
5073 BFD_ASSERT (hash_table != NULL);
5074 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5076 s = bfd_get_section_by_name
5077 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5079 sec_data = elf32_arm_section_data (s);
5081 BFD_ASSERT (s != NULL);
5083 tmp_name = bfd_malloc ((bfd_size_type) strlen
5084 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5086 BFD_ASSERT (tmp_name);
5088 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5089 hash_table->num_vfp11_fixes);
5091 myh = elf_link_hash_lookup
5092 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5094 BFD_ASSERT (myh == NULL);
5097 val = hash_table->vfp11_erratum_glue_size;
5098 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5099 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5100 NULL, TRUE, FALSE, &bh);
5102 myh = (struct elf_link_hash_entry *) bh;
5103 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5104 myh->forced_local = 1;
5106 /* Link veneer back to calling location. */
5107 errcount = ++(sec_data->erratumcount);
5108 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5110 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5112 newerr->u.v.branch = branch;
5113 newerr->u.v.id = hash_table->num_vfp11_fixes;
5114 branch->u.b.veneer = newerr;
5116 newerr->next = sec_data->erratumlist;
5117 sec_data->erratumlist = newerr;
5119 /* A symbol for the return from the veneer. */
5120 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5121 hash_table->num_vfp11_fixes);
5123 myh = elf_link_hash_lookup
5124 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5131 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5132 branch_sec, val, NULL, TRUE, FALSE, &bh);
5134 myh = (struct elf_link_hash_entry *) bh;
5135 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5136 myh->forced_local = 1;
5140 /* Generate a mapping symbol for the veneer section, and explicitly add an
5141 entry for that symbol to the code/data map for the section. */
5142 if (hash_table->vfp11_erratum_glue_size == 0)
5145 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5146 ever requires this erratum fix. */
5147 _bfd_generic_link_add_one_symbol (link_info,
5148 hash_table->bfd_of_glue_owner, "$a",
5149 BSF_LOCAL, s, 0, NULL,
5152 myh = (struct elf_link_hash_entry *) bh;
5153 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5154 myh->forced_local = 1;
5156 /* The elf32_arm_init_maps function only cares about symbols from input
5157 BFDs. We must make a note of this generated mapping symbol
5158 ourselves so that code byteswapping works properly in
5159 elf32_arm_write_section. */
5160 elf32_arm_section_map_add (s, 'a', 0);
5163 s->size += VFP11_ERRATUM_VENEER_SIZE;
5164 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5165 hash_table->num_vfp11_fixes++;
5167 /* The offset of the veneer. */
5171 #define ARM_GLUE_SECTION_FLAGS \
5172 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5173 | SEC_READONLY | SEC_LINKER_CREATED)
5175 /* Create a fake section for use by the ARM backend of the linker. */
5178 arm_make_glue_section (bfd * abfd, const char * name)
5182 sec = bfd_get_section_by_name (abfd, name);
5187 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5190 || !bfd_set_section_alignment (abfd, sec, 2))
5193 /* Set the gc mark to prevent the section from being removed by garbage
5194 collection, despite the fact that no relocs refer to this section. */
5200 /* Add the glue sections to ABFD. This function is called from the
5201 linker scripts in ld/emultempl/{armelf}.em. */
5204 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5205 struct bfd_link_info *info)
5207 /* If we are only performing a partial
5208 link do not bother adding the glue. */
5209 if (info->relocatable)
5212 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5213 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5214 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5215 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5218 /* Select a BFD to be used to hold the sections used by the glue code.
5219 This function is called from the linker scripts in ld/emultempl/
5223 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5225 struct elf32_arm_link_hash_table *globals;
5227 /* If we are only performing a partial link
5228 do not bother getting a bfd to hold the glue. */
5229 if (info->relocatable)
5232 /* Make sure we don't attach the glue sections to a dynamic object. */
5233 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5235 globals = elf32_arm_hash_table (info);
5237 BFD_ASSERT (globals != NULL);
5239 if (globals->bfd_of_glue_owner != NULL)
5242 /* Save the bfd for later use. */
5243 globals->bfd_of_glue_owner = abfd;
5249 check_use_blx (struct elf32_arm_link_hash_table *globals)
5251 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5253 globals->use_blx = 1;
5257 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5258 struct bfd_link_info *link_info)
5260 Elf_Internal_Shdr *symtab_hdr;
5261 Elf_Internal_Rela *internal_relocs = NULL;
5262 Elf_Internal_Rela *irel, *irelend;
5263 bfd_byte *contents = NULL;
5266 struct elf32_arm_link_hash_table *globals;
5268 /* If we are only performing a partial link do not bother
5269 to construct any glue. */
5270 if (link_info->relocatable)
5273 /* Here we have a bfd that is to be included on the link. We have a
5274 hook to do reloc rummaging, before section sizes are nailed down. */
5275 globals = elf32_arm_hash_table (link_info);
5277 BFD_ASSERT (globals != NULL);
5279 check_use_blx (globals);
5281 if (globals->byteswap_code && !bfd_big_endian (abfd))
5283 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5288 /* PR 5398: If we have not decided to include any loadable sections in
5289 the output then we will not have a glue owner bfd. This is OK, it
5290 just means that there is nothing else for us to do here. */
5291 if (globals->bfd_of_glue_owner == NULL)
5294 /* Rummage around all the relocs and map the glue vectors. */
5295 sec = abfd->sections;
5300 for (; sec != NULL; sec = sec->next)
5302 if (sec->reloc_count == 0)
5305 if ((sec->flags & SEC_EXCLUDE) != 0)
5308 symtab_hdr = & elf_symtab_hdr (abfd);
5310 /* Load the relocs. */
5312 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5314 if (internal_relocs == NULL)
5317 irelend = internal_relocs + sec->reloc_count;
5318 for (irel = internal_relocs; irel < irelend; irel++)
5321 unsigned long r_index;
5323 struct elf_link_hash_entry *h;
5325 r_type = ELF32_R_TYPE (irel->r_info);
5326 r_index = ELF32_R_SYM (irel->r_info);
5328 /* These are the only relocation types we care about. */
5329 if ( r_type != R_ARM_PC24
5330 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5333 /* Get the section contents if we haven't done so already. */
5334 if (contents == NULL)
5336 /* Get cached copy if it exists. */
5337 if (elf_section_data (sec)->this_hdr.contents != NULL)
5338 contents = elf_section_data (sec)->this_hdr.contents;
5341 /* Go get them off disk. */
5342 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5347 if (r_type == R_ARM_V4BX)
5351 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5352 record_arm_bx_glue (link_info, reg);
5356 /* If the relocation is not against a symbol it cannot concern us. */
5359 /* We don't care about local symbols. */
5360 if (r_index < symtab_hdr->sh_info)
5363 /* This is an external symbol. */
5364 r_index -= symtab_hdr->sh_info;
5365 h = (struct elf_link_hash_entry *)
5366 elf_sym_hashes (abfd)[r_index];
5368 /* If the relocation is against a static symbol it must be within
5369 the current section and so cannot be a cross ARM/Thumb relocation. */
5373 /* If the call will go through a PLT entry then we do not need
5375 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5381 /* This one is a call from arm code. We need to look up
5382 the target of the call. If it is a thumb target, we
5384 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5385 record_arm_to_thumb_glue (link_info, h);
5393 if (contents != NULL
5394 && elf_section_data (sec)->this_hdr.contents != contents)
5398 if (internal_relocs != NULL
5399 && elf_section_data (sec)->relocs != internal_relocs)
5400 free (internal_relocs);
5401 internal_relocs = NULL;
5407 if (contents != NULL
5408 && elf_section_data (sec)->this_hdr.contents != contents)
5410 if (internal_relocs != NULL
5411 && elf_section_data (sec)->relocs != internal_relocs)
5412 free (internal_relocs);
5419 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5422 bfd_elf32_arm_init_maps (bfd *abfd)
5424 Elf_Internal_Sym *isymbuf;
5425 Elf_Internal_Shdr *hdr;
5426 unsigned int i, localsyms;
5428 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5429 if (! is_arm_elf (abfd))
5432 if ((abfd->flags & DYNAMIC) != 0)
5435 hdr = & elf_symtab_hdr (abfd);
5436 localsyms = hdr->sh_info;
5438 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5439 should contain the number of local symbols, which should come before any
5440 global symbols. Mapping symbols are always local. */
5441 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5444 /* No internal symbols read? Skip this BFD. */
5445 if (isymbuf == NULL)
5448 for (i = 0; i < localsyms; i++)
5450 Elf_Internal_Sym *isym = &isymbuf[i];
5451 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5455 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5457 name = bfd_elf_string_from_elf_section (abfd,
5458 hdr->sh_link, isym->st_name);
5460 if (bfd_is_arm_special_symbol_name (name,
5461 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5462 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5468 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5469 say what they wanted. */
5472 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5474 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5475 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5477 if (globals->fix_cortex_a8 == -1)
5479 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5480 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5481 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5482 || out_attr[Tag_CPU_arch_profile].i == 0))
5483 globals->fix_cortex_a8 = 1;
5485 globals->fix_cortex_a8 = 0;
5491 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5493 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5494 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5496 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5497 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5499 switch (globals->vfp11_fix)
5501 case BFD_ARM_VFP11_FIX_DEFAULT:
5502 case BFD_ARM_VFP11_FIX_NONE:
5503 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5507 /* Give a warning, but do as the user requests anyway. */
5508 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5509 "workaround is not necessary for target architecture"), obfd);
5512 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5513 /* For earlier architectures, we might need the workaround, but do not
5514 enable it by default. If users is running with broken hardware, they
5515 must enable the erratum fix explicitly. */
5516 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5520 enum bfd_arm_vfp11_pipe
5528 /* Return a VFP register number. This is encoded as RX:X for single-precision
5529 registers, or X:RX for double-precision registers, where RX is the group of
5530 four bits in the instruction encoding and X is the single extension bit.
5531 RX and X fields are specified using their lowest (starting) bit. The return
5534 0...31: single-precision registers s0...s31
5535 32...63: double-precision registers d0...d31.
5537 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5538 encounter VFP3 instructions, so we allow the full range for DP registers. */
5541 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5545 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5547 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5550 /* Set bits in *WMASK according to a register number REG as encoded by
5551 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5554 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5559 *wmask |= 3 << ((reg - 32) * 2);
5562 /* Return TRUE if WMASK overwrites anything in REGS. */
5565 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5569 for (i = 0; i < numregs; i++)
5571 unsigned int reg = regs[i];
5573 if (reg < 32 && (wmask & (1 << reg)) != 0)
5581 if ((wmask & (3 << (reg * 2))) != 0)
5588 /* In this function, we're interested in two things: finding input registers
5589 for VFP data-processing instructions, and finding the set of registers which
5590 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5591 hold the written set, so FLDM etc. are easy to deal with (we're only
5592 interested in 32 SP registers or 16 dp registers, due to the VFP version
5593 implemented by the chip in question). DP registers are marked by setting
5594 both SP registers in the write mask). */
5596 static enum bfd_arm_vfp11_pipe
5597 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5600 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5601 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5603 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5606 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5607 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5609 pqrs = ((insn & 0x00800000) >> 20)
5610 | ((insn & 0x00300000) >> 19)
5611 | ((insn & 0x00000040) >> 6);
5615 case 0: /* fmac[sd]. */
5616 case 1: /* fnmac[sd]. */
5617 case 2: /* fmsc[sd]. */
5618 case 3: /* fnmsc[sd]. */
5620 bfd_arm_vfp11_write_mask (destmask, fd);
5622 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5627 case 4: /* fmul[sd]. */
5628 case 5: /* fnmul[sd]. */
5629 case 6: /* fadd[sd]. */
5630 case 7: /* fsub[sd]. */
5634 case 8: /* fdiv[sd]. */
5637 bfd_arm_vfp11_write_mask (destmask, fd);
5638 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5643 case 15: /* extended opcode. */
5645 unsigned int extn = ((insn >> 15) & 0x1e)
5646 | ((insn >> 7) & 1);
5650 case 0: /* fcpy[sd]. */
5651 case 1: /* fabs[sd]. */
5652 case 2: /* fneg[sd]. */
5653 case 8: /* fcmp[sd]. */
5654 case 9: /* fcmpe[sd]. */
5655 case 10: /* fcmpz[sd]. */
5656 case 11: /* fcmpez[sd]. */
5657 case 16: /* fuito[sd]. */
5658 case 17: /* fsito[sd]. */
5659 case 24: /* ftoui[sd]. */
5660 case 25: /* ftouiz[sd]. */
5661 case 26: /* ftosi[sd]. */
5662 case 27: /* ftosiz[sd]. */
5663 /* These instructions will not bounce due to underflow. */
5668 case 3: /* fsqrt[sd]. */
5669 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5670 registers to cause the erratum in previous instructions. */
5671 bfd_arm_vfp11_write_mask (destmask, fd);
5675 case 15: /* fcvt{ds,sd}. */
5679 bfd_arm_vfp11_write_mask (destmask, fd);
5681 /* Only FCVTSD can underflow. */
5682 if ((insn & 0x100) != 0)
5701 /* Two-register transfer. */
5702 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5704 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5706 if ((insn & 0x100000) == 0)
5709 bfd_arm_vfp11_write_mask (destmask, fm);
5712 bfd_arm_vfp11_write_mask (destmask, fm);
5713 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5719 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5721 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5722 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5726 case 0: /* Two-reg transfer. We should catch these above. */
5729 case 2: /* fldm[sdx]. */
5733 unsigned int i, offset = insn & 0xff;
5738 for (i = fd; i < fd + offset; i++)
5739 bfd_arm_vfp11_write_mask (destmask, i);
5743 case 4: /* fld[sd]. */
5745 bfd_arm_vfp11_write_mask (destmask, fd);
5754 /* Single-register transfer. Note L==0. */
5755 else if ((insn & 0x0f100e10) == 0x0e000a10)
5757 unsigned int opcode = (insn >> 21) & 7;
5758 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5762 case 0: /* fmsr/fmdlr. */
5763 case 1: /* fmdhr. */
5764 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5765 destination register. I don't know if this is exactly right,
5766 but it is the conservative choice. */
5767 bfd_arm_vfp11_write_mask (destmask, fn);
5781 static int elf32_arm_compare_mapping (const void * a, const void * b);
5784 /* Look for potentially-troublesome code sequences which might trigger the
5785 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5786 (available from ARM) for details of the erratum. A short version is
5787 described in ld.texinfo. */
5790 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5793 bfd_byte *contents = NULL;
5795 int regs[3], numregs = 0;
5796 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5797 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5799 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5800 The states transition as follows:
5802 0 -> 1 (vector) or 0 -> 2 (scalar)
5803 A VFP FMAC-pipeline instruction has been seen. Fill
5804 regs[0]..regs[numregs-1] with its input operands. Remember this
5805 instruction in 'first_fmac'.
5808 Any instruction, except for a VFP instruction which overwrites
5813 A VFP instruction has been seen which overwrites any of regs[*].
5814 We must make a veneer! Reset state to 0 before examining next
5818 If we fail to match anything in state 2, reset to state 0 and reset
5819 the instruction pointer to the instruction after 'first_fmac'.
5821 If the VFP11 vector mode is in use, there must be at least two unrelated
5822 instructions between anti-dependent VFP11 instructions to properly avoid
5823 triggering the erratum, hence the use of the extra state 1. */
5825 /* If we are only performing a partial link do not bother
5826 to construct any glue. */
5827 if (link_info->relocatable)
5830 /* Skip if this bfd does not correspond to an ELF image. */
5831 if (! is_arm_elf (abfd))
5834 /* We should have chosen a fix type by the time we get here. */
5835 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5837 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5840 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5841 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5844 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5846 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5847 struct _arm_elf_section_data *sec_data;
5849 /* If we don't have executable progbits, we're not interested in this
5850 section. Also skip if section is to be excluded. */
5851 if (elf_section_type (sec) != SHT_PROGBITS
5852 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5853 || (sec->flags & SEC_EXCLUDE) != 0
5854 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5855 || sec->output_section == bfd_abs_section_ptr
5856 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5859 sec_data = elf32_arm_section_data (sec);
5861 if (sec_data->mapcount == 0)
5864 if (elf_section_data (sec)->this_hdr.contents != NULL)
5865 contents = elf_section_data (sec)->this_hdr.contents;
5866 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5869 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5870 elf32_arm_compare_mapping);
5872 for (span = 0; span < sec_data->mapcount; span++)
5874 unsigned int span_start = sec_data->map[span].vma;
5875 unsigned int span_end = (span == sec_data->mapcount - 1)
5876 ? sec->size : sec_data->map[span + 1].vma;
5877 char span_type = sec_data->map[span].type;
5879 /* FIXME: Only ARM mode is supported at present. We may need to
5880 support Thumb-2 mode also at some point. */
5881 if (span_type != 'a')
5884 for (i = span_start; i < span_end;)
5886 unsigned int next_i = i + 4;
5887 unsigned int insn = bfd_big_endian (abfd)
5888 ? (contents[i] << 24)
5889 | (contents[i + 1] << 16)
5890 | (contents[i + 2] << 8)
5892 : (contents[i + 3] << 24)
5893 | (contents[i + 2] << 16)
5894 | (contents[i + 1] << 8)
5896 unsigned int writemask = 0;
5897 enum bfd_arm_vfp11_pipe pipe;
5902 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5904 /* I'm assuming the VFP11 erratum can trigger with denorm
5905 operands on either the FMAC or the DS pipeline. This might
5906 lead to slightly overenthusiastic veneer insertion. */
5907 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5909 state = use_vector ? 1 : 2;
5911 veneer_of_insn = insn;
5917 int other_regs[3], other_numregs;
5918 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5921 if (pipe != VFP11_BAD
5922 && bfd_arm_vfp11_antidependency (writemask, regs,
5932 int other_regs[3], other_numregs;
5933 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5936 if (pipe != VFP11_BAD
5937 && bfd_arm_vfp11_antidependency (writemask, regs,
5943 next_i = first_fmac + 4;
5949 abort (); /* Should be unreachable. */
5954 elf32_vfp11_erratum_list *newerr
5955 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5958 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5960 newerr->u.b.vfp_insn = veneer_of_insn;
5965 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5972 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5977 newerr->next = sec_data->erratumlist;
5978 sec_data->erratumlist = newerr;
5987 if (contents != NULL
5988 && elf_section_data (sec)->this_hdr.contents != contents)
5996 if (contents != NULL
5997 && elf_section_data (sec)->this_hdr.contents != contents)
6003 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6004 after sections have been laid out, using specially-named symbols. */
6007 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6008 struct bfd_link_info *link_info)
6011 struct elf32_arm_link_hash_table *globals;
6014 if (link_info->relocatable)
6017 /* Skip if this bfd does not correspond to an ELF image. */
6018 if (! is_arm_elf (abfd))
6021 globals = elf32_arm_hash_table (link_info);
6023 tmp_name = bfd_malloc ((bfd_size_type) strlen
6024 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6026 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6028 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6029 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6031 for (; errnode != NULL; errnode = errnode->next)
6033 struct elf_link_hash_entry *myh;
6036 switch (errnode->type)
6038 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6039 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6040 /* Find veneer symbol. */
6041 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6042 errnode->u.b.veneer->u.v.id);
6044 myh = elf_link_hash_lookup
6045 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6048 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6049 "`%s'"), abfd, tmp_name);
6051 vma = myh->root.u.def.section->output_section->vma
6052 + myh->root.u.def.section->output_offset
6053 + myh->root.u.def.value;
6055 errnode->u.b.veneer->vma = vma;
6058 case VFP11_ERRATUM_ARM_VENEER:
6059 case VFP11_ERRATUM_THUMB_VENEER:
6060 /* Find return location. */
6061 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6064 myh = elf_link_hash_lookup
6065 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6068 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6069 "`%s'"), abfd, tmp_name);
6071 vma = myh->root.u.def.section->output_section->vma
6072 + myh->root.u.def.section->output_offset
6073 + myh->root.u.def.value;
6075 errnode->u.v.branch->vma = vma;
6088 /* Set target relocation values needed during linking. */
6091 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6092 struct bfd_link_info *link_info,
6094 char * target2_type,
6097 bfd_arm_vfp11_fix vfp11_fix,
6098 int no_enum_warn, int no_wchar_warn,
6099 int pic_veneer, int fix_cortex_a8)
6101 struct elf32_arm_link_hash_table *globals;
6103 globals = elf32_arm_hash_table (link_info);
6105 globals->target1_is_rel = target1_is_rel;
6106 if (strcmp (target2_type, "rel") == 0)
6107 globals->target2_reloc = R_ARM_REL32;
6108 else if (strcmp (target2_type, "abs") == 0)
6109 globals->target2_reloc = R_ARM_ABS32;
6110 else if (strcmp (target2_type, "got-rel") == 0)
6111 globals->target2_reloc = R_ARM_GOT_PREL;
6114 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6117 globals->fix_v4bx = fix_v4bx;
6118 globals->use_blx |= use_blx;
6119 globals->vfp11_fix = vfp11_fix;
6120 globals->pic_veneer = pic_veneer;
6121 globals->fix_cortex_a8 = fix_cortex_a8;
6123 BFD_ASSERT (is_arm_elf (output_bfd));
6124 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6125 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6128 /* Replace the target offset of a Thumb bl or b.w instruction. */
6131 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6137 BFD_ASSERT ((offset & 1) == 0);
6139 upper = bfd_get_16 (abfd, insn);
6140 lower = bfd_get_16 (abfd, insn + 2);
6141 reloc_sign = (offset < 0) ? 1 : 0;
6142 upper = (upper & ~(bfd_vma) 0x7ff)
6143 | ((offset >> 12) & 0x3ff)
6144 | (reloc_sign << 10);
6145 lower = (lower & ~(bfd_vma) 0x2fff)
6146 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6147 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6148 | ((offset >> 1) & 0x7ff);
6149 bfd_put_16 (abfd, upper, insn);
6150 bfd_put_16 (abfd, lower, insn + 2);
6153 /* Thumb code calling an ARM function. */
6156 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6160 asection * input_section,
6161 bfd_byte * hit_data,
6164 bfd_signed_vma addend,
6166 char **error_message)
6170 long int ret_offset;
6171 struct elf_link_hash_entry * myh;
6172 struct elf32_arm_link_hash_table * globals;
6174 myh = find_thumb_glue (info, name, error_message);
6178 globals = elf32_arm_hash_table (info);
6180 BFD_ASSERT (globals != NULL);
6181 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6183 my_offset = myh->root.u.def.value;
6185 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6186 THUMB2ARM_GLUE_SECTION_NAME);
6188 BFD_ASSERT (s != NULL);
6189 BFD_ASSERT (s->contents != NULL);
6190 BFD_ASSERT (s->output_section != NULL);
6192 if ((my_offset & 0x01) == 0x01)
6195 && sym_sec->owner != NULL
6196 && !INTERWORK_FLAG (sym_sec->owner))
6198 (*_bfd_error_handler)
6199 (_("%B(%s): warning: interworking not enabled.\n"
6200 " first occurrence: %B: thumb call to arm"),
6201 sym_sec->owner, input_bfd, name);
6207 myh->root.u.def.value = my_offset;
6209 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6210 s->contents + my_offset);
6212 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6213 s->contents + my_offset + 2);
6216 /* Address of destination of the stub. */
6217 ((bfd_signed_vma) val)
6219 /* Offset from the start of the current section
6220 to the start of the stubs. */
6222 /* Offset of the start of this stub from the start of the stubs. */
6224 /* Address of the start of the current section. */
6225 + s->output_section->vma)
6226 /* The branch instruction is 4 bytes into the stub. */
6228 /* ARM branches work from the pc of the instruction + 8. */
6231 put_arm_insn (globals, output_bfd,
6232 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6233 s->contents + my_offset + 4);
6236 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6238 /* Now go back and fix up the original BL insn to point to here. */
6240 /* Address of where the stub is located. */
6241 (s->output_section->vma + s->output_offset + my_offset)
6242 /* Address of where the BL is located. */
6243 - (input_section->output_section->vma + input_section->output_offset
6245 /* Addend in the relocation. */
6247 /* Biassing for PC-relative addressing. */
6250 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6255 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6257 static struct elf_link_hash_entry *
6258 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6265 char ** error_message)
6268 long int ret_offset;
6269 struct elf_link_hash_entry * myh;
6270 struct elf32_arm_link_hash_table * globals;
6272 myh = find_arm_glue (info, name, error_message);
6276 globals = elf32_arm_hash_table (info);
6278 BFD_ASSERT (globals != NULL);
6279 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6281 my_offset = myh->root.u.def.value;
6283 if ((my_offset & 0x01) == 0x01)
6286 && sym_sec->owner != NULL
6287 && !INTERWORK_FLAG (sym_sec->owner))
6289 (*_bfd_error_handler)
6290 (_("%B(%s): warning: interworking not enabled.\n"
6291 " first occurrence: %B: arm call to thumb"),
6292 sym_sec->owner, input_bfd, name);
6296 myh->root.u.def.value = my_offset;
6298 if (info->shared || globals->root.is_relocatable_executable
6299 || globals->pic_veneer)
6301 /* For relocatable objects we can't use absolute addresses,
6302 so construct the address from a relative offset. */
6303 /* TODO: If the offset is small it's probably worth
6304 constructing the address with adds. */
6305 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6306 s->contents + my_offset);
6307 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6308 s->contents + my_offset + 4);
6309 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6310 s->contents + my_offset + 8);
6311 /* Adjust the offset by 4 for the position of the add,
6312 and 8 for the pipeline offset. */
6313 ret_offset = (val - (s->output_offset
6314 + s->output_section->vma
6317 bfd_put_32 (output_bfd, ret_offset,
6318 s->contents + my_offset + 12);
6320 else if (globals->use_blx)
6322 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6323 s->contents + my_offset);
6325 /* It's a thumb address. Add the low order bit. */
6326 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6327 s->contents + my_offset + 4);
6331 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6332 s->contents + my_offset);
6334 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6335 s->contents + my_offset + 4);
6337 /* It's a thumb address. Add the low order bit. */
6338 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6339 s->contents + my_offset + 8);
6345 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6350 /* Arm code calling a Thumb function. */
6353 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6357 asection * input_section,
6358 bfd_byte * hit_data,
6361 bfd_signed_vma addend,
6363 char **error_message)
6365 unsigned long int tmp;
6368 long int ret_offset;
6369 struct elf_link_hash_entry * myh;
6370 struct elf32_arm_link_hash_table * globals;
6372 globals = elf32_arm_hash_table (info);
6374 BFD_ASSERT (globals != NULL);
6375 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6377 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6378 ARM2THUMB_GLUE_SECTION_NAME);
6379 BFD_ASSERT (s != NULL);
6380 BFD_ASSERT (s->contents != NULL);
6381 BFD_ASSERT (s->output_section != NULL);
6383 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6384 sym_sec, val, s, error_message);
6388 my_offset = myh->root.u.def.value;
6389 tmp = bfd_get_32 (input_bfd, hit_data);
6390 tmp = tmp & 0xFF000000;
6392 /* Somehow these are both 4 too far, so subtract 8. */
6393 ret_offset = (s->output_offset
6395 + s->output_section->vma
6396 - (input_section->output_offset
6397 + input_section->output_section->vma
6401 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6403 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6408 /* Populate Arm stub for an exported Thumb function. */
6411 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6413 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6415 struct elf_link_hash_entry * myh;
6416 struct elf32_arm_link_hash_entry *eh;
6417 struct elf32_arm_link_hash_table * globals;
6420 char *error_message;
6422 eh = elf32_arm_hash_entry (h);
6423 /* Allocate stubs for exported Thumb functions on v4t. */
6424 if (eh->export_glue == NULL)
6427 globals = elf32_arm_hash_table (info);
6429 BFD_ASSERT (globals != NULL);
6430 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6432 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6433 ARM2THUMB_GLUE_SECTION_NAME);
6434 BFD_ASSERT (s != NULL);
6435 BFD_ASSERT (s->contents != NULL);
6436 BFD_ASSERT (s->output_section != NULL);
6438 sec = eh->export_glue->root.u.def.section;
6440 BFD_ASSERT (sec->output_section != NULL);
6442 val = eh->export_glue->root.u.def.value + sec->output_offset
6443 + sec->output_section->vma;
6445 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6446 h->root.u.def.section->owner,
6447 globals->obfd, sec, val, s,
6453 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6456 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6461 struct elf32_arm_link_hash_table *globals;
6463 globals = elf32_arm_hash_table (info);
6465 BFD_ASSERT (globals != NULL);
6466 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6468 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6469 ARM_BX_GLUE_SECTION_NAME);
6470 BFD_ASSERT (s != NULL);
6471 BFD_ASSERT (s->contents != NULL);
6472 BFD_ASSERT (s->output_section != NULL);
6474 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6476 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6478 if ((globals->bx_glue_offset[reg] & 1) == 0)
6480 p = s->contents + glue_addr;
6481 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6482 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6483 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6484 globals->bx_glue_offset[reg] |= 1;
6487 return glue_addr + s->output_section->vma + s->output_offset;
6490 /* Generate Arm stubs for exported Thumb symbols. */
6492 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6493 struct bfd_link_info *link_info)
6495 struct elf32_arm_link_hash_table * globals;
6497 if (link_info == NULL)
6498 /* Ignore this if we are not called by the ELF backend linker. */
6501 globals = elf32_arm_hash_table (link_info);
6502 /* If blx is available then exported Thumb symbols are OK and there is
6504 if (globals->use_blx)
6507 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6511 /* Some relocations map to different relocations depending on the
6512 target. Return the real relocation. */
6515 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6521 if (globals->target1_is_rel)
6527 return globals->target2_reloc;
6534 /* Return the base VMA address which should be subtracted from real addresses
6535 when resolving @dtpoff relocation.
6536 This is PT_TLS segment p_vaddr. */
6539 dtpoff_base (struct bfd_link_info *info)
6541 /* If tls_sec is NULL, we should have signalled an error already. */
6542 if (elf_hash_table (info)->tls_sec == NULL)
6544 return elf_hash_table (info)->tls_sec->vma;
6547 /* Return the relocation value for @tpoff relocation
6548 if STT_TLS virtual address is ADDRESS. */
6551 tpoff (struct bfd_link_info *info, bfd_vma address)
6553 struct elf_link_hash_table *htab = elf_hash_table (info);
6556 /* If tls_sec is NULL, we should have signalled an error already. */
6557 if (htab->tls_sec == NULL)
6559 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6560 return address - htab->tls_sec->vma + base;
6563 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6564 VALUE is the relocation value. */
6566 static bfd_reloc_status_type
6567 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6570 return bfd_reloc_overflow;
6572 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6573 bfd_put_32 (abfd, value, data);
6574 return bfd_reloc_ok;
6577 /* For a given value of n, calculate the value of G_n as required to
6578 deal with group relocations. We return it in the form of an
6579 encoded constant-and-rotation, together with the final residual. If n is
6580 specified as less than zero, then final_residual is filled with the
6581 input value and no further action is performed. */
6584 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6588 bfd_vma encoded_g_n = 0;
6589 bfd_vma residual = value; /* Also known as Y_n. */
6591 for (current_n = 0; current_n <= n; current_n++)
6595 /* Calculate which part of the value to mask. */
6602 /* Determine the most significant bit in the residual and
6603 align the resulting value to a 2-bit boundary. */
6604 for (msb = 30; msb >= 0; msb -= 2)
6605 if (residual & (3 << msb))
6608 /* The desired shift is now (msb - 6), or zero, whichever
6615 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6616 g_n = residual & (0xff << shift);
6617 encoded_g_n = (g_n >> shift)
6618 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6620 /* Calculate the residual for the next time around. */
6624 *final_residual = residual;
6629 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6630 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6633 identify_add_or_sub (bfd_vma insn)
6635 int opcode = insn & 0x1e00000;
6637 if (opcode == 1 << 23) /* ADD */
6640 if (opcode == 1 << 22) /* SUB */
6646 /* Perform a relocation as part of a final link. */
6648 static bfd_reloc_status_type
6649 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6652 asection * input_section,
6653 bfd_byte * contents,
6654 Elf_Internal_Rela * rel,
6656 struct bfd_link_info * info,
6658 const char * sym_name,
6660 struct elf_link_hash_entry * h,
6661 bfd_boolean * unresolved_reloc_p,
6662 char ** error_message)
6664 unsigned long r_type = howto->type;
6665 unsigned long r_symndx;
6666 bfd_byte * hit_data = contents + rel->r_offset;
6667 bfd * dynobj = NULL;
6668 Elf_Internal_Shdr * symtab_hdr;
6669 struct elf_link_hash_entry ** sym_hashes;
6670 bfd_vma * local_got_offsets;
6671 asection * sgot = NULL;
6672 asection * splt = NULL;
6673 asection * sreloc = NULL;
6675 bfd_signed_vma signed_addend;
6676 struct elf32_arm_link_hash_table * globals;
6678 globals = elf32_arm_hash_table (info);
6680 BFD_ASSERT (is_arm_elf (input_bfd));
6682 /* Some relocation types map to different relocations depending on the
6683 target. We pick the right one here. */
6684 r_type = arm_real_reloc_type (globals, r_type);
6685 if (r_type != howto->type)
6686 howto = elf32_arm_howto_from_type (r_type);
6688 /* If the start address has been set, then set the EF_ARM_HASENTRY
6689 flag. Setting this more than once is redundant, but the cost is
6690 not too high, and it keeps the code simple.
6692 The test is done here, rather than somewhere else, because the
6693 start address is only set just before the final link commences.
6695 Note - if the user deliberately sets a start address of 0, the
6696 flag will not be set. */
6697 if (bfd_get_start_address (output_bfd) != 0)
6698 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6700 dynobj = elf_hash_table (info)->dynobj;
6703 sgot = bfd_get_section_by_name (dynobj, ".got");
6704 splt = bfd_get_section_by_name (dynobj, ".plt");
6706 symtab_hdr = & elf_symtab_hdr (input_bfd);
6707 sym_hashes = elf_sym_hashes (input_bfd);
6708 local_got_offsets = elf_local_got_offsets (input_bfd);
6709 r_symndx = ELF32_R_SYM (rel->r_info);
6711 if (globals->use_rel)
6713 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6715 if (addend & ((howto->src_mask + 1) >> 1))
6718 signed_addend &= ~ howto->src_mask;
6719 signed_addend |= addend;
6722 signed_addend = addend;
6725 addend = signed_addend = rel->r_addend;
6730 /* We don't need to find a value for this symbol. It's just a
6732 *unresolved_reloc_p = FALSE;
6733 return bfd_reloc_ok;
6736 if (!globals->vxworks_p)
6737 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6741 case R_ARM_ABS32_NOI:
6743 case R_ARM_REL32_NOI:
6749 /* Handle relocations which should use the PLT entry. ABS32/REL32
6750 will use the symbol's value, which may point to a PLT entry, but we
6751 don't need to handle that here. If we created a PLT entry, all
6752 branches in this object should go to it, except if the PLT is too
6753 far away, in which case a long branch stub should be inserted. */
6754 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6755 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6756 && r_type != R_ARM_CALL
6757 && r_type != R_ARM_JUMP24
6758 && r_type != R_ARM_PLT32)
6761 && h->plt.offset != (bfd_vma) -1)
6763 /* If we've created a .plt section, and assigned a PLT entry to
6764 this function, it should not be known to bind locally. If
6765 it were, we would have cleared the PLT entry. */
6766 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6768 value = (splt->output_section->vma
6769 + splt->output_offset
6771 *unresolved_reloc_p = FALSE;
6772 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6773 contents, rel->r_offset, value,
6777 /* When generating a shared object or relocatable executable, these
6778 relocations are copied into the output file to be resolved at
6780 if ((info->shared || globals->root.is_relocatable_executable)
6781 && (input_section->flags & SEC_ALLOC)
6782 && !(elf32_arm_hash_table (info)->vxworks_p
6783 && strcmp (input_section->output_section->name,
6785 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6786 || !SYMBOL_CALLS_LOCAL (info, h))
6788 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6789 || h->root.type != bfd_link_hash_undefweak)
6790 && r_type != R_ARM_PC24
6791 && r_type != R_ARM_CALL
6792 && r_type != R_ARM_JUMP24
6793 && r_type != R_ARM_PREL31
6794 && r_type != R_ARM_PLT32)
6796 Elf_Internal_Rela outrel;
6798 bfd_boolean skip, relocate;
6800 *unresolved_reloc_p = FALSE;
6804 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6805 ! globals->use_rel);
6808 return bfd_reloc_notsupported;
6814 outrel.r_addend = addend;
6816 _bfd_elf_section_offset (output_bfd, info, input_section,
6818 if (outrel.r_offset == (bfd_vma) -1)
6820 else if (outrel.r_offset == (bfd_vma) -2)
6821 skip = TRUE, relocate = TRUE;
6822 outrel.r_offset += (input_section->output_section->vma
6823 + input_section->output_offset);
6826 memset (&outrel, 0, sizeof outrel);
6831 || !h->def_regular))
6832 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6837 /* This symbol is local, or marked to become local. */
6838 if (sym_flags == STT_ARM_TFUNC)
6840 if (globals->symbian_p)
6844 /* On Symbian OS, the data segment and text segement
6845 can be relocated independently. Therefore, we
6846 must indicate the segment to which this
6847 relocation is relative. The BPABI allows us to
6848 use any symbol in the right segment; we just use
6849 the section symbol as it is convenient. (We
6850 cannot use the symbol given by "h" directly as it
6851 will not appear in the dynamic symbol table.)
6853 Note that the dynamic linker ignores the section
6854 symbol value, so we don't subtract osec->vma
6855 from the emitted reloc addend. */
6857 osec = sym_sec->output_section;
6859 osec = input_section->output_section;
6860 symbol = elf_section_data (osec)->dynindx;
6863 struct elf_link_hash_table *htab = elf_hash_table (info);
6865 if ((osec->flags & SEC_READONLY) == 0
6866 && htab->data_index_section != NULL)
6867 osec = htab->data_index_section;
6869 osec = htab->text_index_section;
6870 symbol = elf_section_data (osec)->dynindx;
6872 BFD_ASSERT (symbol != 0);
6875 /* On SVR4-ish systems, the dynamic loader cannot
6876 relocate the text and data segments independently,
6877 so the symbol does not matter. */
6879 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6880 if (globals->use_rel)
6883 outrel.r_addend += value;
6886 loc = sreloc->contents;
6887 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6888 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6890 /* If this reloc is against an external symbol, we do not want to
6891 fiddle with the addend. Otherwise, we need to include the symbol
6892 value so that it becomes an addend for the dynamic reloc. */
6894 return bfd_reloc_ok;
6896 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6897 contents, rel->r_offset, value,
6900 else switch (r_type)
6903 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6905 case R_ARM_XPC25: /* Arm BLX instruction. */
6908 case R_ARM_PC24: /* Arm B/BL instruction. */
6912 bfd_signed_vma branch_offset;
6913 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6915 if (r_type == R_ARM_XPC25)
6917 /* Check for Arm calling Arm function. */
6918 /* FIXME: Should we translate the instruction into a BL
6919 instruction instead ? */
6920 if (sym_flags != STT_ARM_TFUNC)
6921 (*_bfd_error_handler)
6922 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6924 h ? h->root.root.string : "(local)");
6926 else if (r_type == R_ARM_PC24)
6928 /* Check for Arm calling Thumb function. */
6929 if (sym_flags == STT_ARM_TFUNC)
6931 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6932 output_bfd, input_section,
6933 hit_data, sym_sec, rel->r_offset,
6934 signed_addend, value,
6936 return bfd_reloc_ok;
6938 return bfd_reloc_dangerous;
6942 /* Check if a stub has to be inserted because the
6943 destination is too far or we are changing mode. */
6944 if ( r_type == R_ARM_CALL
6945 || r_type == R_ARM_JUMP24
6946 || r_type == R_ARM_PLT32)
6948 /* If the call goes through a PLT entry, make sure to
6949 check distance to the right destination address. */
6950 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6952 value = (splt->output_section->vma
6953 + splt->output_offset
6955 *unresolved_reloc_p = FALSE;
6958 from = (input_section->output_section->vma
6959 + input_section->output_offset
6961 branch_offset = (bfd_signed_vma)(value - from);
6963 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6964 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6965 || ((sym_flags == STT_ARM_TFUNC)
6966 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6967 || (r_type == R_ARM_JUMP24)
6968 || (r_type == R_ARM_PLT32) ))
6971 /* The target is out of reach, so redirect the
6972 branch to the local stub for this function. */
6974 stub_entry = elf32_arm_get_stub_entry (input_section,
6977 if (stub_entry != NULL)
6978 value = (stub_entry->stub_offset
6979 + stub_entry->stub_sec->output_offset
6980 + stub_entry->stub_sec->output_section->vma);
6984 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6986 S is the address of the symbol in the relocation.
6987 P is address of the instruction being relocated.
6988 A is the addend (extracted from the instruction) in bytes.
6990 S is held in 'value'.
6991 P is the base address of the section containing the
6992 instruction plus the offset of the reloc into that
6994 (input_section->output_section->vma +
6995 input_section->output_offset +
6997 A is the addend, converted into bytes, ie:
7000 Note: None of these operations have knowledge of the pipeline
7001 size of the processor, thus it is up to the assembler to
7002 encode this information into the addend. */
7003 value -= (input_section->output_section->vma
7004 + input_section->output_offset);
7005 value -= rel->r_offset;
7006 if (globals->use_rel)
7007 value += (signed_addend << howto->size);
7009 /* RELA addends do not have to be adjusted by howto->size. */
7010 value += signed_addend;
7012 signed_addend = value;
7013 signed_addend >>= howto->rightshift;
7015 /* A branch to an undefined weak symbol is turned into a jump to
7016 the next instruction unless a PLT entry will be created. */
7017 if (h && h->root.type == bfd_link_hash_undefweak
7018 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7020 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7025 /* Perform a signed range check. */
7026 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7027 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7028 return bfd_reloc_overflow;
7030 addend = (value & 2);
7032 value = (signed_addend & howto->dst_mask)
7033 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7035 if (r_type == R_ARM_CALL)
7037 /* Set the H bit in the BLX instruction. */
7038 if (sym_flags == STT_ARM_TFUNC)
7043 value &= ~(bfd_vma)(1 << 24);
7046 /* Select the correct instruction (BL or BLX). */
7047 /* Only if we are not handling a BL to a stub. In this
7048 case, mode switching is performed by the stub. */
7049 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7053 value &= ~(bfd_vma)(1 << 28);
7063 if (sym_flags == STT_ARM_TFUNC)
7067 case R_ARM_ABS32_NOI:
7073 if (sym_flags == STT_ARM_TFUNC)
7075 value -= (input_section->output_section->vma
7076 + input_section->output_offset + rel->r_offset);
7079 case R_ARM_REL32_NOI:
7081 value -= (input_section->output_section->vma
7082 + input_section->output_offset + rel->r_offset);
7086 value -= (input_section->output_section->vma
7087 + input_section->output_offset + rel->r_offset);
7088 value += signed_addend;
7089 if (! h || h->root.type != bfd_link_hash_undefweak)
7091 /* Check for overflow. */
7092 if ((value ^ (value >> 1)) & (1 << 30))
7093 return bfd_reloc_overflow;
7095 value &= 0x7fffffff;
7096 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7097 if (sym_flags == STT_ARM_TFUNC)
7102 bfd_put_32 (input_bfd, value, hit_data);
7103 return bfd_reloc_ok;
7107 if ((long) value > 0x7f || (long) value < -0x80)
7108 return bfd_reloc_overflow;
7110 bfd_put_8 (input_bfd, value, hit_data);
7111 return bfd_reloc_ok;
7116 if ((long) value > 0x7fff || (long) value < -0x8000)
7117 return bfd_reloc_overflow;
7119 bfd_put_16 (input_bfd, value, hit_data);
7120 return bfd_reloc_ok;
7122 case R_ARM_THM_ABS5:
7123 /* Support ldr and str instructions for the thumb. */
7124 if (globals->use_rel)
7126 /* Need to refetch addend. */
7127 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7128 /* ??? Need to determine shift amount from operand size. */
7129 addend >>= howto->rightshift;
7133 /* ??? Isn't value unsigned? */
7134 if ((long) value > 0x1f || (long) value < -0x10)
7135 return bfd_reloc_overflow;
7137 /* ??? Value needs to be properly shifted into place first. */
7138 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7139 bfd_put_16 (input_bfd, value, hit_data);
7140 return bfd_reloc_ok;
7142 case R_ARM_THM_ALU_PREL_11_0:
7143 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7146 bfd_signed_vma relocation;
7148 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7149 | bfd_get_16 (input_bfd, hit_data + 2);
7151 if (globals->use_rel)
7153 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7154 | ((insn & (1 << 26)) >> 15);
7155 if (insn & 0xf00000)
7156 signed_addend = -signed_addend;
7159 relocation = value + signed_addend;
7160 relocation -= (input_section->output_section->vma
7161 + input_section->output_offset
7164 value = abs (relocation);
7166 if (value >= 0x1000)
7167 return bfd_reloc_overflow;
7169 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7170 | ((value & 0x700) << 4)
7171 | ((value & 0x800) << 15);
7175 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7176 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7178 return bfd_reloc_ok;
7182 /* PR 10073: This reloc is not generated by the GNU toolchain,
7183 but it is supported for compatibility with third party libraries
7184 generated by other compilers, specifically the ARM/IAR. */
7187 bfd_signed_vma relocation;
7189 insn = bfd_get_16 (input_bfd, hit_data);
7191 if (globals->use_rel)
7192 addend = (insn & 0x00ff) << 2;
7194 relocation = value + addend;
7195 relocation -= (input_section->output_section->vma
7196 + input_section->output_offset
7199 value = abs (relocation);
7201 /* We do not check for overflow of this reloc. Although strictly
7202 speaking this is incorrect, it appears to be necessary in order
7203 to work with IAR generated relocs. Since GCC and GAS do not
7204 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7205 a problem for them. */
7208 insn = (insn & 0xff00) | (value >> 2);
7210 bfd_put_16 (input_bfd, insn, hit_data);
7212 return bfd_reloc_ok;
7215 case R_ARM_THM_PC12:
7216 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7219 bfd_signed_vma relocation;
7221 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7222 | bfd_get_16 (input_bfd, hit_data + 2);
7224 if (globals->use_rel)
7226 signed_addend = insn & 0xfff;
7227 if (!(insn & (1 << 23)))
7228 signed_addend = -signed_addend;
7231 relocation = value + signed_addend;
7232 relocation -= (input_section->output_section->vma
7233 + input_section->output_offset
7236 value = abs (relocation);
7238 if (value >= 0x1000)
7239 return bfd_reloc_overflow;
7241 insn = (insn & 0xff7ff000) | value;
7242 if (relocation >= 0)
7245 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7246 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7248 return bfd_reloc_ok;
7251 case R_ARM_THM_XPC22:
7252 case R_ARM_THM_CALL:
7253 case R_ARM_THM_JUMP24:
7254 /* Thumb BL (branch long instruction). */
7258 bfd_boolean overflow = FALSE;
7259 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7260 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7261 bfd_signed_vma reloc_signed_max;
7262 bfd_signed_vma reloc_signed_min;
7264 bfd_signed_vma signed_check;
7266 int thumb2 = using_thumb2 (globals);
7268 /* A branch to an undefined weak symbol is turned into a jump to
7269 the next instruction unless a PLT entry will be created. */
7270 if (h && h->root.type == bfd_link_hash_undefweak
7271 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7273 bfd_put_16 (input_bfd, 0xe000, hit_data);
7274 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7275 return bfd_reloc_ok;
7278 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7279 with Thumb-1) involving the J1 and J2 bits. */
7280 if (globals->use_rel)
7282 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7283 bfd_vma upper = upper_insn & 0x3ff;
7284 bfd_vma lower = lower_insn & 0x7ff;
7285 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7286 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7287 bfd_vma i1 = j1 ^ s ? 0 : 1;
7288 bfd_vma i2 = j2 ^ s ? 0 : 1;
7290 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7292 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7294 signed_addend = addend;
7297 if (r_type == R_ARM_THM_XPC22)
7299 /* Check for Thumb to Thumb call. */
7300 /* FIXME: Should we translate the instruction into a BL
7301 instruction instead ? */
7302 if (sym_flags == STT_ARM_TFUNC)
7303 (*_bfd_error_handler)
7304 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7306 h ? h->root.root.string : "(local)");
7310 /* If it is not a call to Thumb, assume call to Arm.
7311 If it is a call relative to a section name, then it is not a
7312 function call at all, but rather a long jump. Calls through
7313 the PLT do not require stubs. */
7314 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7315 && (h == NULL || splt == NULL
7316 || h->plt.offset == (bfd_vma) -1))
7318 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7320 /* Convert BL to BLX. */
7321 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7323 else if (( r_type != R_ARM_THM_CALL)
7324 && (r_type != R_ARM_THM_JUMP24))
7326 if (elf32_thumb_to_arm_stub
7327 (info, sym_name, input_bfd, output_bfd, input_section,
7328 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7330 return bfd_reloc_ok;
7332 return bfd_reloc_dangerous;
7335 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7336 && r_type == R_ARM_THM_CALL)
7338 /* Make sure this is a BL. */
7339 lower_insn |= 0x1800;
7343 /* Handle calls via the PLT. */
7344 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7346 value = (splt->output_section->vma
7347 + splt->output_offset
7349 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7351 /* If the Thumb BLX instruction is available, convert the
7352 BL to a BLX instruction to call the ARM-mode PLT entry. */
7353 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7356 /* Target the Thumb stub before the ARM PLT entry. */
7357 value -= PLT_THUMB_STUB_SIZE;
7358 *unresolved_reloc_p = FALSE;
7361 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7363 /* Check if a stub has to be inserted because the destination
7366 bfd_signed_vma branch_offset;
7367 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7369 from = (input_section->output_section->vma
7370 + input_section->output_offset
7372 branch_offset = (bfd_signed_vma)(value - from);
7375 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7376 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7379 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7380 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7381 || ((sym_flags != STT_ARM_TFUNC)
7382 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7383 || r_type == R_ARM_THM_JUMP24)))
7385 /* The target is out of reach or we are changing modes, so
7386 redirect the branch to the local stub for this
7388 stub_entry = elf32_arm_get_stub_entry (input_section,
7391 if (stub_entry != NULL)
7392 value = (stub_entry->stub_offset
7393 + stub_entry->stub_sec->output_offset
7394 + stub_entry->stub_sec->output_section->vma);
7396 /* If this call becomes a call to Arm, force BLX. */
7397 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7400 && !arm_stub_is_thumb (stub_entry->stub_type))
7401 || (sym_flags != STT_ARM_TFUNC))
7402 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7407 relocation = value + signed_addend;
7409 relocation -= (input_section->output_section->vma
7410 + input_section->output_offset
7413 check = relocation >> howto->rightshift;
7415 /* If this is a signed value, the rightshift just dropped
7416 leading 1 bits (assuming twos complement). */
7417 if ((bfd_signed_vma) relocation >= 0)
7418 signed_check = check;
7420 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7422 /* Calculate the permissable maximum and minimum values for
7423 this relocation according to whether we're relocating for
7425 bitsize = howto->bitsize;
7428 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7429 reloc_signed_min = ~reloc_signed_max;
7431 /* Assumes two's complement. */
7432 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7435 if ((lower_insn & 0x5000) == 0x4000)
7436 /* For a BLX instruction, make sure that the relocation is rounded up
7437 to a word boundary. This follows the semantics of the instruction
7438 which specifies that bit 1 of the target address will come from bit
7439 1 of the base address. */
7440 relocation = (relocation + 2) & ~ 3;
7442 /* Put RELOCATION back into the insn. Assumes two's complement.
7443 We use the Thumb-2 encoding, which is safe even if dealing with
7444 a Thumb-1 instruction by virtue of our overflow check above. */
7445 reloc_sign = (signed_check < 0) ? 1 : 0;
7446 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7447 | ((relocation >> 12) & 0x3ff)
7448 | (reloc_sign << 10);
7449 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7450 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7451 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7452 | ((relocation >> 1) & 0x7ff);
7454 /* Put the relocated value back in the object file: */
7455 bfd_put_16 (input_bfd, upper_insn, hit_data);
7456 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7458 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7462 case R_ARM_THM_JUMP19:
7463 /* Thumb32 conditional branch instruction. */
7466 bfd_boolean overflow = FALSE;
7467 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7468 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7469 bfd_signed_vma reloc_signed_max = 0xffffe;
7470 bfd_signed_vma reloc_signed_min = -0x100000;
7471 bfd_signed_vma signed_check;
7473 /* Need to refetch the addend, reconstruct the top three bits,
7474 and squish the two 11 bit pieces together. */
7475 if (globals->use_rel)
7477 bfd_vma S = (upper_insn & 0x0400) >> 10;
7478 bfd_vma upper = (upper_insn & 0x003f);
7479 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7480 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7481 bfd_vma lower = (lower_insn & 0x07ff);
7486 upper -= 0x0100; /* Sign extend. */
7488 addend = (upper << 12) | (lower << 1);
7489 signed_addend = addend;
7492 /* Handle calls via the PLT. */
7493 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7495 value = (splt->output_section->vma
7496 + splt->output_offset
7498 /* Target the Thumb stub before the ARM PLT entry. */
7499 value -= PLT_THUMB_STUB_SIZE;
7500 *unresolved_reloc_p = FALSE;
7503 /* ??? Should handle interworking? GCC might someday try to
7504 use this for tail calls. */
7506 relocation = value + signed_addend;
7507 relocation -= (input_section->output_section->vma
7508 + input_section->output_offset
7510 signed_check = (bfd_signed_vma) relocation;
7512 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7515 /* Put RELOCATION back into the insn. */
7517 bfd_vma S = (relocation & 0x00100000) >> 20;
7518 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7519 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7520 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7521 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7523 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7524 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7527 /* Put the relocated value back in the object file: */
7528 bfd_put_16 (input_bfd, upper_insn, hit_data);
7529 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7531 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7534 case R_ARM_THM_JUMP11:
7535 case R_ARM_THM_JUMP8:
7536 case R_ARM_THM_JUMP6:
7537 /* Thumb B (branch) instruction). */
7539 bfd_signed_vma relocation;
7540 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7541 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7542 bfd_signed_vma signed_check;
7544 /* CZB cannot jump backward. */
7545 if (r_type == R_ARM_THM_JUMP6)
7546 reloc_signed_min = 0;
7548 if (globals->use_rel)
7550 /* Need to refetch addend. */
7551 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7552 if (addend & ((howto->src_mask + 1) >> 1))
7555 signed_addend &= ~ howto->src_mask;
7556 signed_addend |= addend;
7559 signed_addend = addend;
7560 /* The value in the insn has been right shifted. We need to
7561 undo this, so that we can perform the address calculation
7562 in terms of bytes. */
7563 signed_addend <<= howto->rightshift;
7565 relocation = value + signed_addend;
7567 relocation -= (input_section->output_section->vma
7568 + input_section->output_offset
7571 relocation >>= howto->rightshift;
7572 signed_check = relocation;
7574 if (r_type == R_ARM_THM_JUMP6)
7575 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7577 relocation &= howto->dst_mask;
7578 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7580 bfd_put_16 (input_bfd, relocation, hit_data);
7582 /* Assumes two's complement. */
7583 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7584 return bfd_reloc_overflow;
7586 return bfd_reloc_ok;
7589 case R_ARM_ALU_PCREL7_0:
7590 case R_ARM_ALU_PCREL15_8:
7591 case R_ARM_ALU_PCREL23_15:
7596 insn = bfd_get_32 (input_bfd, hit_data);
7597 if (globals->use_rel)
7599 /* Extract the addend. */
7600 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7601 signed_addend = addend;
7603 relocation = value + signed_addend;
7605 relocation -= (input_section->output_section->vma
7606 + input_section->output_offset
7608 insn = (insn & ~0xfff)
7609 | ((howto->bitpos << 7) & 0xf00)
7610 | ((relocation >> howto->bitpos) & 0xff);
7611 bfd_put_32 (input_bfd, value, hit_data);
7613 return bfd_reloc_ok;
7615 case R_ARM_GNU_VTINHERIT:
7616 case R_ARM_GNU_VTENTRY:
7617 return bfd_reloc_ok;
7619 case R_ARM_GOTOFF32:
7620 /* Relocation is relative to the start of the
7621 global offset table. */
7623 BFD_ASSERT (sgot != NULL);
7625 return bfd_reloc_notsupported;
7627 /* If we are addressing a Thumb function, we need to adjust the
7628 address by one, so that attempts to call the function pointer will
7629 correctly interpret it as Thumb code. */
7630 if (sym_flags == STT_ARM_TFUNC)
7633 /* Note that sgot->output_offset is not involved in this
7634 calculation. We always want the start of .got. If we
7635 define _GLOBAL_OFFSET_TABLE in a different way, as is
7636 permitted by the ABI, we might have to change this
7638 value -= sgot->output_section->vma;
7639 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7640 contents, rel->r_offset, value,
7644 /* Use global offset table as symbol value. */
7645 BFD_ASSERT (sgot != NULL);
7648 return bfd_reloc_notsupported;
7650 *unresolved_reloc_p = FALSE;
7651 value = sgot->output_section->vma;
7652 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7653 contents, rel->r_offset, value,
7657 case R_ARM_GOT_PREL:
7658 /* Relocation is to the entry for this symbol in the
7659 global offset table. */
7661 return bfd_reloc_notsupported;
7668 off = h->got.offset;
7669 BFD_ASSERT (off != (bfd_vma) -1);
7670 dyn = globals->root.dynamic_sections_created;
7672 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7674 && SYMBOL_REFERENCES_LOCAL (info, h))
7675 || (ELF_ST_VISIBILITY (h->other)
7676 && h->root.type == bfd_link_hash_undefweak))
7678 /* This is actually a static link, or it is a -Bsymbolic link
7679 and the symbol is defined locally. We must initialize this
7680 entry in the global offset table. Since the offset must
7681 always be a multiple of 4, we use the least significant bit
7682 to record whether we have initialized it already.
7684 When doing a dynamic link, we create a .rel(a).got relocation
7685 entry to initialize the value. This is done in the
7686 finish_dynamic_symbol routine. */
7691 /* If we are addressing a Thumb function, we need to
7692 adjust the address by one, so that attempts to
7693 call the function pointer will correctly
7694 interpret it as Thumb code. */
7695 if (sym_flags == STT_ARM_TFUNC)
7698 bfd_put_32 (output_bfd, value, sgot->contents + off);
7703 *unresolved_reloc_p = FALSE;
7705 value = sgot->output_offset + off;
7711 BFD_ASSERT (local_got_offsets != NULL &&
7712 local_got_offsets[r_symndx] != (bfd_vma) -1);
7714 off = local_got_offsets[r_symndx];
7716 /* The offset must always be a multiple of 4. We use the
7717 least significant bit to record whether we have already
7718 generated the necessary reloc. */
7723 /* If we are addressing a Thumb function, we need to
7724 adjust the address by one, so that attempts to
7725 call the function pointer will correctly
7726 interpret it as Thumb code. */
7727 if (sym_flags == STT_ARM_TFUNC)
7730 if (globals->use_rel)
7731 bfd_put_32 (output_bfd, value, sgot->contents + off);
7736 Elf_Internal_Rela outrel;
7739 srelgot = (bfd_get_section_by_name
7740 (dynobj, RELOC_SECTION (globals, ".got")));
7741 BFD_ASSERT (srelgot != NULL);
7743 outrel.r_addend = addend + value;
7744 outrel.r_offset = (sgot->output_section->vma
7745 + sgot->output_offset
7747 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7748 loc = srelgot->contents;
7749 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7750 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7753 local_got_offsets[r_symndx] |= 1;
7756 value = sgot->output_offset + off;
7758 if (r_type != R_ARM_GOT32)
7759 value += sgot->output_section->vma;
7761 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7762 contents, rel->r_offset, value,
7765 case R_ARM_TLS_LDO32:
7766 value = value - dtpoff_base (info);
7768 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7769 contents, rel->r_offset, value,
7772 case R_ARM_TLS_LDM32:
7776 if (globals->sgot == NULL)
7779 off = globals->tls_ldm_got.offset;
7785 /* If we don't know the module number, create a relocation
7789 Elf_Internal_Rela outrel;
7792 if (globals->srelgot == NULL)
7795 outrel.r_addend = 0;
7796 outrel.r_offset = (globals->sgot->output_section->vma
7797 + globals->sgot->output_offset + off);
7798 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7800 if (globals->use_rel)
7801 bfd_put_32 (output_bfd, outrel.r_addend,
7802 globals->sgot->contents + off);
7804 loc = globals->srelgot->contents;
7805 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7806 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7809 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7811 globals->tls_ldm_got.offset |= 1;
7814 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7815 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7817 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7818 contents, rel->r_offset, value,
7822 case R_ARM_TLS_GD32:
7823 case R_ARM_TLS_IE32:
7829 if (globals->sgot == NULL)
7836 dyn = globals->root.dynamic_sections_created;
7837 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7839 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7841 *unresolved_reloc_p = FALSE;
7844 off = h->got.offset;
7845 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7849 if (local_got_offsets == NULL)
7851 off = local_got_offsets[r_symndx];
7852 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7855 if (tls_type == GOT_UNKNOWN)
7862 bfd_boolean need_relocs = FALSE;
7863 Elf_Internal_Rela outrel;
7864 bfd_byte *loc = NULL;
7867 /* The GOT entries have not been initialized yet. Do it
7868 now, and emit any relocations. If both an IE GOT and a
7869 GD GOT are necessary, we emit the GD first. */
7871 if ((info->shared || indx != 0)
7873 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7874 || h->root.type != bfd_link_hash_undefweak))
7877 if (globals->srelgot == NULL)
7879 loc = globals->srelgot->contents;
7880 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7883 if (tls_type & GOT_TLS_GD)
7887 outrel.r_addend = 0;
7888 outrel.r_offset = (globals->sgot->output_section->vma
7889 + globals->sgot->output_offset
7891 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7893 if (globals->use_rel)
7894 bfd_put_32 (output_bfd, outrel.r_addend,
7895 globals->sgot->contents + cur_off);
7897 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7898 globals->srelgot->reloc_count++;
7899 loc += RELOC_SIZE (globals);
7902 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7903 globals->sgot->contents + cur_off + 4);
7906 outrel.r_addend = 0;
7907 outrel.r_info = ELF32_R_INFO (indx,
7908 R_ARM_TLS_DTPOFF32);
7909 outrel.r_offset += 4;
7911 if (globals->use_rel)
7912 bfd_put_32 (output_bfd, outrel.r_addend,
7913 globals->sgot->contents + cur_off + 4);
7916 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7917 globals->srelgot->reloc_count++;
7918 loc += RELOC_SIZE (globals);
7923 /* If we are not emitting relocations for a
7924 general dynamic reference, then we must be in a
7925 static link or an executable link with the
7926 symbol binding locally. Mark it as belonging
7927 to module 1, the executable. */
7928 bfd_put_32 (output_bfd, 1,
7929 globals->sgot->contents + cur_off);
7930 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7931 globals->sgot->contents + cur_off + 4);
7937 if (tls_type & GOT_TLS_IE)
7942 outrel.r_addend = value - dtpoff_base (info);
7944 outrel.r_addend = 0;
7945 outrel.r_offset = (globals->sgot->output_section->vma
7946 + globals->sgot->output_offset
7948 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7950 if (globals->use_rel)
7951 bfd_put_32 (output_bfd, outrel.r_addend,
7952 globals->sgot->contents + cur_off);
7954 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7955 globals->srelgot->reloc_count++;
7956 loc += RELOC_SIZE (globals);
7959 bfd_put_32 (output_bfd, tpoff (info, value),
7960 globals->sgot->contents + cur_off);
7967 local_got_offsets[r_symndx] |= 1;
7970 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7972 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7973 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7975 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7976 contents, rel->r_offset, value,
7980 case R_ARM_TLS_LE32:
7983 (*_bfd_error_handler)
7984 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7985 input_bfd, input_section,
7986 (long) rel->r_offset, howto->name);
7990 value = tpoff (info, value);
7992 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7993 contents, rel->r_offset, value,
7997 if (globals->fix_v4bx)
7999 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8001 /* Ensure that we have a BX instruction. */
8002 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8004 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8006 /* Branch to veneer. */
8008 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8009 glue_addr -= input_section->output_section->vma
8010 + input_section->output_offset
8011 + rel->r_offset + 8;
8012 insn = (insn & 0xf0000000) | 0x0a000000
8013 | ((glue_addr >> 2) & 0x00ffffff);
8017 /* Preserve Rm (lowest four bits) and the condition code
8018 (highest four bits). Other bits encode MOV PC,Rm. */
8019 insn = (insn & 0xf000000f) | 0x01a0f000;
8022 bfd_put_32 (input_bfd, insn, hit_data);
8024 return bfd_reloc_ok;
8026 case R_ARM_MOVW_ABS_NC:
8027 case R_ARM_MOVT_ABS:
8028 case R_ARM_MOVW_PREL_NC:
8029 case R_ARM_MOVT_PREL:
8030 /* Until we properly support segment-base-relative addressing then
8031 we assume the segment base to be zero, as for the group relocations.
8032 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8033 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8034 case R_ARM_MOVW_BREL_NC:
8035 case R_ARM_MOVW_BREL:
8036 case R_ARM_MOVT_BREL:
8038 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8040 if (globals->use_rel)
8042 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8043 signed_addend = (addend ^ 0x8000) - 0x8000;
8046 value += signed_addend;
8048 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8049 value -= (input_section->output_section->vma
8050 + input_section->output_offset + rel->r_offset);
8052 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8053 return bfd_reloc_overflow;
8055 if (sym_flags == STT_ARM_TFUNC)
8058 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8059 || r_type == R_ARM_MOVT_BREL)
8063 insn |= value & 0xfff;
8064 insn |= (value & 0xf000) << 4;
8065 bfd_put_32 (input_bfd, insn, hit_data);
8067 return bfd_reloc_ok;
8069 case R_ARM_THM_MOVW_ABS_NC:
8070 case R_ARM_THM_MOVT_ABS:
8071 case R_ARM_THM_MOVW_PREL_NC:
8072 case R_ARM_THM_MOVT_PREL:
8073 /* Until we properly support segment-base-relative addressing then
8074 we assume the segment base to be zero, as for the above relocations.
8075 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8076 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8077 as R_ARM_THM_MOVT_ABS. */
8078 case R_ARM_THM_MOVW_BREL_NC:
8079 case R_ARM_THM_MOVW_BREL:
8080 case R_ARM_THM_MOVT_BREL:
8084 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8085 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8087 if (globals->use_rel)
8089 addend = ((insn >> 4) & 0xf000)
8090 | ((insn >> 15) & 0x0800)
8091 | ((insn >> 4) & 0x0700)
8093 signed_addend = (addend ^ 0x8000) - 0x8000;
8096 value += signed_addend;
8098 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8099 value -= (input_section->output_section->vma
8100 + input_section->output_offset + rel->r_offset);
8102 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8103 return bfd_reloc_overflow;
8105 if (sym_flags == STT_ARM_TFUNC)
8108 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8109 || r_type == R_ARM_THM_MOVT_BREL)
8113 insn |= (value & 0xf000) << 4;
8114 insn |= (value & 0x0800) << 15;
8115 insn |= (value & 0x0700) << 4;
8116 insn |= (value & 0x00ff);
8118 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8119 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8121 return bfd_reloc_ok;
8123 case R_ARM_ALU_PC_G0_NC:
8124 case R_ARM_ALU_PC_G1_NC:
8125 case R_ARM_ALU_PC_G0:
8126 case R_ARM_ALU_PC_G1:
8127 case R_ARM_ALU_PC_G2:
8128 case R_ARM_ALU_SB_G0_NC:
8129 case R_ARM_ALU_SB_G1_NC:
8130 case R_ARM_ALU_SB_G0:
8131 case R_ARM_ALU_SB_G1:
8132 case R_ARM_ALU_SB_G2:
8134 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8135 bfd_vma pc = input_section->output_section->vma
8136 + input_section->output_offset + rel->r_offset;
8137 /* sb should be the origin of the *segment* containing the symbol.
8138 It is not clear how to obtain this OS-dependent value, so we
8139 make an arbitrary choice of zero. */
8143 bfd_signed_vma signed_value;
8146 /* Determine which group of bits to select. */
8149 case R_ARM_ALU_PC_G0_NC:
8150 case R_ARM_ALU_PC_G0:
8151 case R_ARM_ALU_SB_G0_NC:
8152 case R_ARM_ALU_SB_G0:
8156 case R_ARM_ALU_PC_G1_NC:
8157 case R_ARM_ALU_PC_G1:
8158 case R_ARM_ALU_SB_G1_NC:
8159 case R_ARM_ALU_SB_G1:
8163 case R_ARM_ALU_PC_G2:
8164 case R_ARM_ALU_SB_G2:
8172 /* If REL, extract the addend from the insn. If RELA, it will
8173 have already been fetched for us. */
8174 if (globals->use_rel)
8177 bfd_vma constant = insn & 0xff;
8178 bfd_vma rotation = (insn & 0xf00) >> 8;
8181 signed_addend = constant;
8184 /* Compensate for the fact that in the instruction, the
8185 rotation is stored in multiples of 2 bits. */
8188 /* Rotate "constant" right by "rotation" bits. */
8189 signed_addend = (constant >> rotation) |
8190 (constant << (8 * sizeof (bfd_vma) - rotation));
8193 /* Determine if the instruction is an ADD or a SUB.
8194 (For REL, this determines the sign of the addend.) */
8195 negative = identify_add_or_sub (insn);
8198 (*_bfd_error_handler)
8199 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8200 input_bfd, input_section,
8201 (long) rel->r_offset, howto->name);
8202 return bfd_reloc_overflow;
8205 signed_addend *= negative;
8208 /* Compute the value (X) to go in the place. */
8209 if (r_type == R_ARM_ALU_PC_G0_NC
8210 || r_type == R_ARM_ALU_PC_G1_NC
8211 || r_type == R_ARM_ALU_PC_G0
8212 || r_type == R_ARM_ALU_PC_G1
8213 || r_type == R_ARM_ALU_PC_G2)
8215 signed_value = value - pc + signed_addend;
8217 /* Section base relative. */
8218 signed_value = value - sb + signed_addend;
8220 /* If the target symbol is a Thumb function, then set the
8221 Thumb bit in the address. */
8222 if (sym_flags == STT_ARM_TFUNC)
8225 /* Calculate the value of the relevant G_n, in encoded
8226 constant-with-rotation format. */
8227 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8230 /* Check for overflow if required. */
8231 if ((r_type == R_ARM_ALU_PC_G0
8232 || r_type == R_ARM_ALU_PC_G1
8233 || r_type == R_ARM_ALU_PC_G2
8234 || r_type == R_ARM_ALU_SB_G0
8235 || r_type == R_ARM_ALU_SB_G1
8236 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8238 (*_bfd_error_handler)
8239 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8240 input_bfd, input_section,
8241 (long) rel->r_offset, abs (signed_value), howto->name);
8242 return bfd_reloc_overflow;
8245 /* Mask out the value and the ADD/SUB part of the opcode; take care
8246 not to destroy the S bit. */
8249 /* Set the opcode according to whether the value to go in the
8250 place is negative. */
8251 if (signed_value < 0)
8256 /* Encode the offset. */
8259 bfd_put_32 (input_bfd, insn, hit_data);
8261 return bfd_reloc_ok;
8263 case R_ARM_LDR_PC_G0:
8264 case R_ARM_LDR_PC_G1:
8265 case R_ARM_LDR_PC_G2:
8266 case R_ARM_LDR_SB_G0:
8267 case R_ARM_LDR_SB_G1:
8268 case R_ARM_LDR_SB_G2:
8270 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8271 bfd_vma pc = input_section->output_section->vma
8272 + input_section->output_offset + rel->r_offset;
8273 bfd_vma sb = 0; /* See note above. */
8275 bfd_signed_vma signed_value;
8278 /* Determine which groups of bits to calculate. */
8281 case R_ARM_LDR_PC_G0:
8282 case R_ARM_LDR_SB_G0:
8286 case R_ARM_LDR_PC_G1:
8287 case R_ARM_LDR_SB_G1:
8291 case R_ARM_LDR_PC_G2:
8292 case R_ARM_LDR_SB_G2:
8300 /* If REL, extract the addend from the insn. If RELA, it will
8301 have already been fetched for us. */
8302 if (globals->use_rel)
8304 int negative = (insn & (1 << 23)) ? 1 : -1;
8305 signed_addend = negative * (insn & 0xfff);
8308 /* Compute the value (X) to go in the place. */
8309 if (r_type == R_ARM_LDR_PC_G0
8310 || r_type == R_ARM_LDR_PC_G1
8311 || r_type == R_ARM_LDR_PC_G2)
8313 signed_value = value - pc + signed_addend;
8315 /* Section base relative. */
8316 signed_value = value - sb + signed_addend;
8318 /* Calculate the value of the relevant G_{n-1} to obtain
8319 the residual at that stage. */
8320 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8322 /* Check for overflow. */
8323 if (residual >= 0x1000)
8325 (*_bfd_error_handler)
8326 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8327 input_bfd, input_section,
8328 (long) rel->r_offset, abs (signed_value), howto->name);
8329 return bfd_reloc_overflow;
8332 /* Mask out the value and U bit. */
8335 /* Set the U bit if the value to go in the place is non-negative. */
8336 if (signed_value >= 0)
8339 /* Encode the offset. */
8342 bfd_put_32 (input_bfd, insn, hit_data);
8344 return bfd_reloc_ok;
8346 case R_ARM_LDRS_PC_G0:
8347 case R_ARM_LDRS_PC_G1:
8348 case R_ARM_LDRS_PC_G2:
8349 case R_ARM_LDRS_SB_G0:
8350 case R_ARM_LDRS_SB_G1:
8351 case R_ARM_LDRS_SB_G2:
8353 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8354 bfd_vma pc = input_section->output_section->vma
8355 + input_section->output_offset + rel->r_offset;
8356 bfd_vma sb = 0; /* See note above. */
8358 bfd_signed_vma signed_value;
8361 /* Determine which groups of bits to calculate. */
8364 case R_ARM_LDRS_PC_G0:
8365 case R_ARM_LDRS_SB_G0:
8369 case R_ARM_LDRS_PC_G1:
8370 case R_ARM_LDRS_SB_G1:
8374 case R_ARM_LDRS_PC_G2:
8375 case R_ARM_LDRS_SB_G2:
8383 /* If REL, extract the addend from the insn. If RELA, it will
8384 have already been fetched for us. */
8385 if (globals->use_rel)
8387 int negative = (insn & (1 << 23)) ? 1 : -1;
8388 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8391 /* Compute the value (X) to go in the place. */
8392 if (r_type == R_ARM_LDRS_PC_G0
8393 || r_type == R_ARM_LDRS_PC_G1
8394 || r_type == R_ARM_LDRS_PC_G2)
8396 signed_value = value - pc + signed_addend;
8398 /* Section base relative. */
8399 signed_value = value - sb + signed_addend;
8401 /* Calculate the value of the relevant G_{n-1} to obtain
8402 the residual at that stage. */
8403 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8405 /* Check for overflow. */
8406 if (residual >= 0x100)
8408 (*_bfd_error_handler)
8409 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8410 input_bfd, input_section,
8411 (long) rel->r_offset, abs (signed_value), howto->name);
8412 return bfd_reloc_overflow;
8415 /* Mask out the value and U bit. */
8418 /* Set the U bit if the value to go in the place is non-negative. */
8419 if (signed_value >= 0)
8422 /* Encode the offset. */
8423 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8425 bfd_put_32 (input_bfd, insn, hit_data);
8427 return bfd_reloc_ok;
8429 case R_ARM_LDC_PC_G0:
8430 case R_ARM_LDC_PC_G1:
8431 case R_ARM_LDC_PC_G2:
8432 case R_ARM_LDC_SB_G0:
8433 case R_ARM_LDC_SB_G1:
8434 case R_ARM_LDC_SB_G2:
8436 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8437 bfd_vma pc = input_section->output_section->vma
8438 + input_section->output_offset + rel->r_offset;
8439 bfd_vma sb = 0; /* See note above. */
8441 bfd_signed_vma signed_value;
8444 /* Determine which groups of bits to calculate. */
8447 case R_ARM_LDC_PC_G0:
8448 case R_ARM_LDC_SB_G0:
8452 case R_ARM_LDC_PC_G1:
8453 case R_ARM_LDC_SB_G1:
8457 case R_ARM_LDC_PC_G2:
8458 case R_ARM_LDC_SB_G2:
8466 /* If REL, extract the addend from the insn. If RELA, it will
8467 have already been fetched for us. */
8468 if (globals->use_rel)
8470 int negative = (insn & (1 << 23)) ? 1 : -1;
8471 signed_addend = negative * ((insn & 0xff) << 2);
8474 /* Compute the value (X) to go in the place. */
8475 if (r_type == R_ARM_LDC_PC_G0
8476 || r_type == R_ARM_LDC_PC_G1
8477 || r_type == R_ARM_LDC_PC_G2)
8479 signed_value = value - pc + signed_addend;
8481 /* Section base relative. */
8482 signed_value = value - sb + signed_addend;
8484 /* Calculate the value of the relevant G_{n-1} to obtain
8485 the residual at that stage. */
8486 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8488 /* Check for overflow. (The absolute value to go in the place must be
8489 divisible by four and, after having been divided by four, must
8490 fit in eight bits.) */
8491 if ((residual & 0x3) != 0 || residual >= 0x400)
8493 (*_bfd_error_handler)
8494 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8495 input_bfd, input_section,
8496 (long) rel->r_offset, abs (signed_value), howto->name);
8497 return bfd_reloc_overflow;
8500 /* Mask out the value and U bit. */
8503 /* Set the U bit if the value to go in the place is non-negative. */
8504 if (signed_value >= 0)
8507 /* Encode the offset. */
8508 insn |= residual >> 2;
8510 bfd_put_32 (input_bfd, insn, hit_data);
8512 return bfd_reloc_ok;
8515 return bfd_reloc_notsupported;
8519 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8521 arm_add_to_rel (bfd * abfd,
8523 reloc_howto_type * howto,
8524 bfd_signed_vma increment)
8526 bfd_signed_vma addend;
8528 if (howto->type == R_ARM_THM_CALL
8529 || howto->type == R_ARM_THM_JUMP24)
8531 int upper_insn, lower_insn;
8534 upper_insn = bfd_get_16 (abfd, address);
8535 lower_insn = bfd_get_16 (abfd, address + 2);
8536 upper = upper_insn & 0x7ff;
8537 lower = lower_insn & 0x7ff;
8539 addend = (upper << 12) | (lower << 1);
8540 addend += increment;
8543 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8544 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8546 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8547 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8553 contents = bfd_get_32 (abfd, address);
8555 /* Get the (signed) value from the instruction. */
8556 addend = contents & howto->src_mask;
8557 if (addend & ((howto->src_mask + 1) >> 1))
8559 bfd_signed_vma mask;
8562 mask &= ~ howto->src_mask;
8566 /* Add in the increment, (which is a byte value). */
8567 switch (howto->type)
8570 addend += increment;
8577 addend <<= howto->size;
8578 addend += increment;
8580 /* Should we check for overflow here ? */
8582 /* Drop any undesired bits. */
8583 addend >>= howto->rightshift;
8587 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8589 bfd_put_32 (abfd, contents, address);
8593 #define IS_ARM_TLS_RELOC(R_TYPE) \
8594 ((R_TYPE) == R_ARM_TLS_GD32 \
8595 || (R_TYPE) == R_ARM_TLS_LDO32 \
8596 || (R_TYPE) == R_ARM_TLS_LDM32 \
8597 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8598 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8599 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8600 || (R_TYPE) == R_ARM_TLS_LE32 \
8601 || (R_TYPE) == R_ARM_TLS_IE32)
8603 /* Relocate an ARM ELF section. */
8606 elf32_arm_relocate_section (bfd * output_bfd,
8607 struct bfd_link_info * info,
8609 asection * input_section,
8610 bfd_byte * contents,
8611 Elf_Internal_Rela * relocs,
8612 Elf_Internal_Sym * local_syms,
8613 asection ** local_sections)
8615 Elf_Internal_Shdr *symtab_hdr;
8616 struct elf_link_hash_entry **sym_hashes;
8617 Elf_Internal_Rela *rel;
8618 Elf_Internal_Rela *relend;
8620 struct elf32_arm_link_hash_table * globals;
8622 globals = elf32_arm_hash_table (info);
8624 symtab_hdr = & elf_symtab_hdr (input_bfd);
8625 sym_hashes = elf_sym_hashes (input_bfd);
8628 relend = relocs + input_section->reloc_count;
8629 for (; rel < relend; rel++)
8632 reloc_howto_type * howto;
8633 unsigned long r_symndx;
8634 Elf_Internal_Sym * sym;
8636 struct elf_link_hash_entry * h;
8638 bfd_reloc_status_type r;
8641 bfd_boolean unresolved_reloc = FALSE;
8642 char *error_message = NULL;
8644 r_symndx = ELF32_R_SYM (rel->r_info);
8645 r_type = ELF32_R_TYPE (rel->r_info);
8646 r_type = arm_real_reloc_type (globals, r_type);
8648 if ( r_type == R_ARM_GNU_VTENTRY
8649 || r_type == R_ARM_GNU_VTINHERIT)
8652 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8653 howto = bfd_reloc.howto;
8659 if (r_symndx < symtab_hdr->sh_info)
8661 sym = local_syms + r_symndx;
8662 sym_type = ELF32_ST_TYPE (sym->st_info);
8663 sec = local_sections[r_symndx];
8664 if (globals->use_rel)
8666 relocation = (sec->output_section->vma
8667 + sec->output_offset
8669 if (!info->relocatable
8670 && (sec->flags & SEC_MERGE)
8671 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8674 bfd_vma addend, value;
8678 case R_ARM_MOVW_ABS_NC:
8679 case R_ARM_MOVT_ABS:
8680 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8681 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8682 addend = (addend ^ 0x8000) - 0x8000;
8685 case R_ARM_THM_MOVW_ABS_NC:
8686 case R_ARM_THM_MOVT_ABS:
8687 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8689 value |= bfd_get_16 (input_bfd,
8690 contents + rel->r_offset + 2);
8691 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8692 | ((value & 0x04000000) >> 15);
8693 addend = (addend ^ 0x8000) - 0x8000;
8697 if (howto->rightshift
8698 || (howto->src_mask & (howto->src_mask + 1)))
8700 (*_bfd_error_handler)
8701 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8702 input_bfd, input_section,
8703 (long) rel->r_offset, howto->name);
8707 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8709 /* Get the (signed) value from the instruction. */
8710 addend = value & howto->src_mask;
8711 if (addend & ((howto->src_mask + 1) >> 1))
8713 bfd_signed_vma mask;
8716 mask &= ~ howto->src_mask;
8724 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8726 addend += msec->output_section->vma + msec->output_offset;
8728 /* Cases here must match those in the preceeding
8729 switch statement. */
8732 case R_ARM_MOVW_ABS_NC:
8733 case R_ARM_MOVT_ABS:
8734 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8736 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8739 case R_ARM_THM_MOVW_ABS_NC:
8740 case R_ARM_THM_MOVT_ABS:
8741 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8742 | (addend & 0xff) | ((addend & 0x0800) << 15);
8743 bfd_put_16 (input_bfd, value >> 16,
8744 contents + rel->r_offset);
8745 bfd_put_16 (input_bfd, value,
8746 contents + rel->r_offset + 2);
8750 value = (value & ~ howto->dst_mask)
8751 | (addend & howto->dst_mask);
8752 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8758 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8764 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8765 r_symndx, symtab_hdr, sym_hashes,
8767 unresolved_reloc, warned);
8772 if (sec != NULL && elf_discarded_section (sec))
8774 /* For relocs against symbols from removed linkonce sections,
8775 or sections discarded by a linker script, we just want the
8776 section contents zeroed. Avoid any special processing. */
8777 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8783 if (info->relocatable)
8785 /* This is a relocatable link. We don't have to change
8786 anything, unless the reloc is against a section symbol,
8787 in which case we have to adjust according to where the
8788 section symbol winds up in the output section. */
8789 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8791 if (globals->use_rel)
8792 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8793 howto, (bfd_signed_vma) sec->output_offset);
8795 rel->r_addend += sec->output_offset;
8801 name = h->root.root.string;
8804 name = (bfd_elf_string_from_elf_section
8805 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8806 if (name == NULL || *name == '\0')
8807 name = bfd_section_name (input_bfd, sec);
8811 && r_type != R_ARM_NONE
8813 || h->root.type == bfd_link_hash_defined
8814 || h->root.type == bfd_link_hash_defweak)
8815 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8817 (*_bfd_error_handler)
8818 ((sym_type == STT_TLS
8819 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8820 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8823 (long) rel->r_offset,
8828 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8829 input_section, contents, rel,
8830 relocation, info, sec, name,
8831 (h ? ELF_ST_TYPE (h->type) :
8832 ELF_ST_TYPE (sym->st_info)), h,
8833 &unresolved_reloc, &error_message);
8835 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8836 because such sections are not SEC_ALLOC and thus ld.so will
8837 not process them. */
8838 if (unresolved_reloc
8839 && !((input_section->flags & SEC_DEBUGGING) != 0
8842 (*_bfd_error_handler)
8843 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8846 (long) rel->r_offset,
8848 h->root.root.string);
8852 if (r != bfd_reloc_ok)
8856 case bfd_reloc_overflow:
8857 /* If the overflowing reloc was to an undefined symbol,
8858 we have already printed one error message and there
8859 is no point complaining again. */
8861 h->root.type != bfd_link_hash_undefined)
8862 && (!((*info->callbacks->reloc_overflow)
8863 (info, (h ? &h->root : NULL), name, howto->name,
8864 (bfd_vma) 0, input_bfd, input_section,
8869 case bfd_reloc_undefined:
8870 if (!((*info->callbacks->undefined_symbol)
8871 (info, name, input_bfd, input_section,
8872 rel->r_offset, TRUE)))
8876 case bfd_reloc_outofrange:
8877 error_message = _("out of range");
8880 case bfd_reloc_notsupported:
8881 error_message = _("unsupported relocation");
8884 case bfd_reloc_dangerous:
8885 /* error_message should already be set. */
8889 error_message = _("unknown error");
8893 BFD_ASSERT (error_message != NULL);
8894 if (!((*info->callbacks->reloc_dangerous)
8895 (info, error_message, input_bfd, input_section,
8906 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8907 adds the edit to the start of the list. (The list must be built in order of
8908 ascending INDEX: the function's callers are primarily responsible for
8909 maintaining that condition). */
8912 add_unwind_table_edit (arm_unwind_table_edit **head,
8913 arm_unwind_table_edit **tail,
8914 arm_unwind_edit_type type,
8915 asection *linked_section,
8918 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8920 new_edit->type = type;
8921 new_edit->linked_section = linked_section;
8922 new_edit->index = index;
8926 new_edit->next = NULL;
8929 (*tail)->next = new_edit;
8938 new_edit->next = *head;
8947 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8949 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8951 adjust_exidx_size(asection *exidx_sec, int adjust)
8955 if (!exidx_sec->rawsize)
8956 exidx_sec->rawsize = exidx_sec->size;
8958 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8959 out_sec = exidx_sec->output_section;
8960 /* Adjust size of output section. */
8961 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8964 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8966 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8968 struct _arm_elf_section_data *exidx_arm_data;
8970 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8971 add_unwind_table_edit (
8972 &exidx_arm_data->u.exidx.unwind_edit_list,
8973 &exidx_arm_data->u.exidx.unwind_edit_tail,
8974 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8976 adjust_exidx_size(exidx_sec, 8);
8979 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8980 made to those tables, such that:
8982 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8983 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8984 codes which have been inlined into the index).
8986 The edits are applied when the tables are written
8987 (in elf32_arm_write_section).
8991 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8992 unsigned int num_text_sections,
8993 struct bfd_link_info *info)
8996 unsigned int last_second_word = 0, i;
8997 asection *last_exidx_sec = NULL;
8998 asection *last_text_sec = NULL;
8999 int last_unwind_type = -1;
9001 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9003 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9007 for (sec = inp->sections; sec != NULL; sec = sec->next)
9009 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9010 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9012 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9015 if (elf_sec->linked_to)
9017 Elf_Internal_Shdr *linked_hdr
9018 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9019 struct _arm_elf_section_data *linked_sec_arm_data
9020 = get_arm_elf_section_data (linked_hdr->bfd_section);
9022 if (linked_sec_arm_data == NULL)
9025 /* Link this .ARM.exidx section back from the text section it
9027 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9032 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9033 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9034 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9037 for (i = 0; i < num_text_sections; i++)
9039 asection *sec = text_section_order[i];
9040 asection *exidx_sec;
9041 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9042 struct _arm_elf_section_data *exidx_arm_data;
9043 bfd_byte *contents = NULL;
9044 int deleted_exidx_bytes = 0;
9046 arm_unwind_table_edit *unwind_edit_head = NULL;
9047 arm_unwind_table_edit *unwind_edit_tail = NULL;
9048 Elf_Internal_Shdr *hdr;
9051 if (arm_data == NULL)
9054 exidx_sec = arm_data->u.text.arm_exidx_sec;
9055 if (exidx_sec == NULL)
9057 /* Section has no unwind data. */
9058 if (last_unwind_type == 0 || !last_exidx_sec)
9061 /* Ignore zero sized sections. */
9065 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9066 last_unwind_type = 0;
9070 /* Skip /DISCARD/ sections. */
9071 if (bfd_is_abs_section (exidx_sec->output_section))
9074 hdr = &elf_section_data (exidx_sec)->this_hdr;
9075 if (hdr->sh_type != SHT_ARM_EXIDX)
9078 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9079 if (exidx_arm_data == NULL)
9082 ibfd = exidx_sec->owner;
9084 if (hdr->contents != NULL)
9085 contents = hdr->contents;
9086 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9090 for (j = 0; j < hdr->sh_size; j += 8)
9092 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9096 /* An EXIDX_CANTUNWIND entry. */
9097 if (second_word == 1)
9099 if (last_unwind_type == 0)
9103 /* Inlined unwinding data. Merge if equal to previous. */
9104 else if ((second_word & 0x80000000) != 0)
9106 if (last_second_word == second_word && last_unwind_type == 1)
9109 last_second_word = second_word;
9111 /* Normal table entry. In theory we could merge these too,
9112 but duplicate entries are likely to be much less common. */
9118 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9119 DELETE_EXIDX_ENTRY, NULL, j / 8);
9121 deleted_exidx_bytes += 8;
9124 last_unwind_type = unwind_type;
9127 /* Free contents if we allocated it ourselves. */
9128 if (contents != hdr->contents)
9131 /* Record edits to be applied later (in elf32_arm_write_section). */
9132 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9133 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9135 if (deleted_exidx_bytes > 0)
9136 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9138 last_exidx_sec = exidx_sec;
9139 last_text_sec = sec;
9142 /* Add terminating CANTUNWIND entry. */
9143 if (last_exidx_sec && last_unwind_type != 0)
9144 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9150 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9151 bfd *ibfd, const char *name)
9153 asection *sec, *osec;
9155 sec = bfd_get_section_by_name (ibfd, name);
9156 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9159 osec = sec->output_section;
9160 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9163 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9164 sec->output_offset, sec->size))
9171 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9173 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9175 /* Invoke the regular ELF backend linker to do all the work. */
9176 if (!bfd_elf_final_link (abfd, info))
9179 /* Write out any glue sections now that we have created all the
9181 if (globals->bfd_of_glue_owner != NULL)
9183 if (! elf32_arm_output_glue_section (info, abfd,
9184 globals->bfd_of_glue_owner,
9185 ARM2THUMB_GLUE_SECTION_NAME))
9188 if (! elf32_arm_output_glue_section (info, abfd,
9189 globals->bfd_of_glue_owner,
9190 THUMB2ARM_GLUE_SECTION_NAME))
9193 if (! elf32_arm_output_glue_section (info, abfd,
9194 globals->bfd_of_glue_owner,
9195 VFP11_ERRATUM_VENEER_SECTION_NAME))
9198 if (! elf32_arm_output_glue_section (info, abfd,
9199 globals->bfd_of_glue_owner,
9200 ARM_BX_GLUE_SECTION_NAME))
9207 /* Set the right machine number. */
9210 elf32_arm_object_p (bfd *abfd)
9214 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9216 if (mach != bfd_mach_arm_unknown)
9217 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9219 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9220 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9223 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9228 /* Function to keep ARM specific flags in the ELF header. */
9231 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9233 if (elf_flags_init (abfd)
9234 && elf_elfheader (abfd)->e_flags != flags)
9236 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9238 if (flags & EF_ARM_INTERWORK)
9239 (*_bfd_error_handler)
9240 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9244 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9250 elf_elfheader (abfd)->e_flags = flags;
9251 elf_flags_init (abfd) = TRUE;
9257 /* Copy backend specific data from one object module to another. */
9260 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9265 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9268 in_flags = elf_elfheader (ibfd)->e_flags;
9269 out_flags = elf_elfheader (obfd)->e_flags;
9271 if (elf_flags_init (obfd)
9272 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9273 && in_flags != out_flags)
9275 /* Cannot mix APCS26 and APCS32 code. */
9276 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9279 /* Cannot mix float APCS and non-float APCS code. */
9280 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9283 /* If the src and dest have different interworking flags
9284 then turn off the interworking bit. */
9285 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9287 if (out_flags & EF_ARM_INTERWORK)
9289 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9292 in_flags &= ~EF_ARM_INTERWORK;
9295 /* Likewise for PIC, though don't warn for this case. */
9296 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9297 in_flags &= ~EF_ARM_PIC;
9300 elf_elfheader (obfd)->e_flags = in_flags;
9301 elf_flags_init (obfd) = TRUE;
9303 /* Also copy the EI_OSABI field. */
9304 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9305 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9307 /* Copy object attributes. */
9308 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9313 /* Values for Tag_ABI_PCS_R9_use. */
9322 /* Values for Tag_ABI_PCS_RW_data. */
9325 AEABI_PCS_RW_data_absolute,
9326 AEABI_PCS_RW_data_PCrel,
9327 AEABI_PCS_RW_data_SBrel,
9328 AEABI_PCS_RW_data_unused
9331 /* Values for Tag_ABI_enum_size. */
9337 AEABI_enum_forced_wide
9340 /* Determine whether an object attribute tag takes an integer, a
9344 elf32_arm_obj_attrs_arg_type (int tag)
9346 if (tag == Tag_compatibility)
9347 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9348 else if (tag == Tag_nodefaults)
9349 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9350 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9351 return ATTR_TYPE_FLAG_STR_VAL;
9353 return ATTR_TYPE_FLAG_INT_VAL;
9355 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9358 /* The ABI defines that Tag_conformance should be emitted first, and that
9359 Tag_nodefaults should be second (if either is defined). This sets those
9360 two positions, and bumps up the position of all the remaining tags to
9363 elf32_arm_obj_attrs_order (int num)
9366 return Tag_conformance;
9368 return Tag_nodefaults;
9369 if ((num - 2) < Tag_nodefaults)
9371 if ((num - 1) < Tag_conformance)
9376 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9377 Returns -1 if no architecture could be read. */
9380 get_secondary_compatible_arch (bfd *abfd)
9382 obj_attribute *attr =
9383 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9385 /* Note: the tag and its argument below are uleb128 values, though
9386 currently-defined values fit in one byte for each. */
9388 && attr->s[0] == Tag_CPU_arch
9389 && (attr->s[1] & 128) != 128
9393 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9397 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9398 The tag is removed if ARCH is -1. */
9401 set_secondary_compatible_arch (bfd *abfd, int arch)
9403 obj_attribute *attr =
9404 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9412 /* Note: the tag and its argument below are uleb128 values, though
9413 currently-defined values fit in one byte for each. */
9415 attr->s = bfd_alloc (abfd, 3);
9416 attr->s[0] = Tag_CPU_arch;
9421 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9425 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9426 int newtag, int secondary_compat)
9428 #define T(X) TAG_CPU_ARCH_##X
9429 int tagl, tagh, result;
9432 T(V6T2), /* PRE_V4. */
9436 T(V6T2), /* V5TE. */
9437 T(V6T2), /* V5TEJ. */
9444 T(V6K), /* PRE_V4. */
9449 T(V6K), /* V5TEJ. */
9451 T(V6KZ), /* V6KZ. */
9457 T(V7), /* PRE_V4. */
9476 T(V6K), /* V5TEJ. */
9478 T(V6KZ), /* V6KZ. */
9491 T(V6K), /* V5TEJ. */
9493 T(V6KZ), /* V6KZ. */
9497 T(V6S_M), /* V6_M. */
9498 T(V6S_M) /* V6S_M. */
9500 const int v4t_plus_v6_m[] =
9506 T(V5TE), /* V5TE. */
9507 T(V5TEJ), /* V5TEJ. */
9509 T(V6KZ), /* V6KZ. */
9510 T(V6T2), /* V6T2. */
9513 T(V6_M), /* V6_M. */
9514 T(V6S_M), /* V6S_M. */
9515 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9524 /* Pseudo-architecture. */
9528 /* Check we've not got a higher architecture than we know about. */
9530 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9532 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9536 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9538 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9539 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9540 oldtag = T(V4T_PLUS_V6_M);
9542 /* And override the new tag if we have a Tag_also_compatible_with on the
9545 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9546 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9547 newtag = T(V4T_PLUS_V6_M);
9549 tagl = (oldtag < newtag) ? oldtag : newtag;
9550 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9552 /* Architectures before V6KZ add features monotonically. */
9553 if (tagh <= TAG_CPU_ARCH_V6KZ)
9556 result = comb[tagh - T(V6T2)][tagl];
9558 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9559 as the canonical version. */
9560 if (result == T(V4T_PLUS_V6_M))
9563 *secondary_compat_out = T(V6_M);
9566 *secondary_compat_out = -1;
9570 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9571 ibfd, oldtag, newtag);
9579 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9580 are conflicting attributes. */
9583 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9585 obj_attribute *in_attr;
9586 obj_attribute *out_attr;
9587 obj_attribute_list *in_list;
9588 obj_attribute_list *out_list;
9589 obj_attribute_list **out_listp;
9590 /* Some tags have 0 = don't care, 1 = strong requirement,
9591 2 = weak requirement. */
9592 static const int order_021[3] = {0, 2, 1};
9593 /* For use with Tag_VFP_arch. */
9594 static const int order_01243[5] = {0, 1, 2, 4, 3};
9596 bfd_boolean result = TRUE;
9598 /* Skip the linker stubs file. This preserves previous behavior
9599 of accepting unknown attributes in the first input file - but
9601 if (ibfd->flags & BFD_LINKER_CREATED)
9604 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9606 /* This is the first object. Copy the attributes. */
9607 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9609 /* Use the Tag_null value to indicate the attributes have been
9611 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9616 in_attr = elf_known_obj_attributes_proc (ibfd);
9617 out_attr = elf_known_obj_attributes_proc (obfd);
9618 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9619 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9621 /* Ignore mismatches if the object doesn't use floating point. */
9622 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9623 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9624 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9627 (_("error: %B uses VFP register arguments, %B does not"),
9633 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9635 /* Merge this attribute with existing attributes. */
9638 case Tag_CPU_raw_name:
9640 /* These are merged after Tag_CPU_arch. */
9643 case Tag_ABI_optimization_goals:
9644 case Tag_ABI_FP_optimization_goals:
9645 /* Use the first value seen. */
9650 int secondary_compat = -1, secondary_compat_out = -1;
9651 unsigned int saved_out_attr = out_attr[i].i;
9652 static const char *name_table[] = {
9653 /* These aren't real CPU names, but we can't guess
9654 that from the architecture version alone. */
9670 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9671 secondary_compat = get_secondary_compatible_arch (ibfd);
9672 secondary_compat_out = get_secondary_compatible_arch (obfd);
9673 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9674 &secondary_compat_out,
9677 set_secondary_compatible_arch (obfd, secondary_compat_out);
9679 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9680 if (out_attr[i].i == saved_out_attr)
9681 ; /* Leave the names alone. */
9682 else if (out_attr[i].i == in_attr[i].i)
9684 /* The output architecture has been changed to match the
9685 input architecture. Use the input names. */
9686 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9687 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9689 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9690 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9695 out_attr[Tag_CPU_name].s = NULL;
9696 out_attr[Tag_CPU_raw_name].s = NULL;
9699 /* If we still don't have a value for Tag_CPU_name,
9700 make one up now. Tag_CPU_raw_name remains blank. */
9701 if (out_attr[Tag_CPU_name].s == NULL
9702 && out_attr[i].i < ARRAY_SIZE (name_table))
9703 out_attr[Tag_CPU_name].s =
9704 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9708 case Tag_ARM_ISA_use:
9709 case Tag_THUMB_ISA_use:
9711 case Tag_Advanced_SIMD_arch:
9712 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9713 case Tag_ABI_FP_rounding:
9714 case Tag_ABI_FP_exceptions:
9715 case Tag_ABI_FP_user_exceptions:
9716 case Tag_ABI_FP_number_model:
9717 case Tag_VFP_HP_extension:
9718 case Tag_CPU_unaligned_access:
9720 case Tag_Virtualization_use:
9721 case Tag_MPextension_use:
9722 /* Use the largest value specified. */
9723 if (in_attr[i].i > out_attr[i].i)
9724 out_attr[i].i = in_attr[i].i;
9727 case Tag_ABI_align8_preserved:
9728 case Tag_ABI_PCS_RO_data:
9729 /* Use the smallest value specified. */
9730 if (in_attr[i].i < out_attr[i].i)
9731 out_attr[i].i = in_attr[i].i;
9734 case Tag_ABI_align8_needed:
9735 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9736 && (in_attr[Tag_ABI_align8_preserved].i == 0
9737 || out_attr[Tag_ABI_align8_preserved].i == 0))
9739 /* This error message should be enabled once all non-conformant
9740 binaries in the toolchain have had the attributes set
9743 (_("error: %B: 8-byte data alignment conflicts with %B"),
9748 case Tag_ABI_FP_denormal:
9749 case Tag_ABI_PCS_GOT_use:
9750 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9751 value if greater than 2 (for future-proofing). */
9752 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9753 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9754 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9755 out_attr[i].i = in_attr[i].i;
9759 case Tag_CPU_arch_profile:
9760 if (out_attr[i].i != in_attr[i].i)
9762 /* 0 will merge with anything.
9763 'A' and 'S' merge to 'A'.
9764 'R' and 'S' merge to 'R'.
9765 'M' and 'A|R|S' is an error. */
9766 if (out_attr[i].i == 0
9767 || (out_attr[i].i == 'S'
9768 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9769 out_attr[i].i = in_attr[i].i;
9770 else if (in_attr[i].i == 0
9771 || (in_attr[i].i == 'S'
9772 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9777 (_("error: %B: Conflicting architecture profiles %c/%c"),
9779 in_attr[i].i ? in_attr[i].i : '0',
9780 out_attr[i].i ? out_attr[i].i : '0');
9786 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9787 largest value if greater than 4 (for future-proofing). */
9788 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9789 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9790 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9791 out_attr[i].i = in_attr[i].i;
9793 case Tag_PCS_config:
9794 if (out_attr[i].i == 0)
9795 out_attr[i].i = in_attr[i].i;
9796 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9798 /* It's sometimes ok to mix different configs, so this is only
9801 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9804 case Tag_ABI_PCS_R9_use:
9805 if (in_attr[i].i != out_attr[i].i
9806 && out_attr[i].i != AEABI_R9_unused
9807 && in_attr[i].i != AEABI_R9_unused)
9810 (_("error: %B: Conflicting use of R9"), ibfd);
9813 if (out_attr[i].i == AEABI_R9_unused)
9814 out_attr[i].i = in_attr[i].i;
9816 case Tag_ABI_PCS_RW_data:
9817 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9818 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9819 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9822 (_("error: %B: SB relative addressing conflicts with use of R9"),
9826 /* Use the smallest value specified. */
9827 if (in_attr[i].i < out_attr[i].i)
9828 out_attr[i].i = in_attr[i].i;
9830 case Tag_ABI_PCS_wchar_t:
9831 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9832 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9835 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9836 ibfd, in_attr[i].i, out_attr[i].i);
9838 else if (in_attr[i].i && !out_attr[i].i)
9839 out_attr[i].i = in_attr[i].i;
9841 case Tag_ABI_enum_size:
9842 if (in_attr[i].i != AEABI_enum_unused)
9844 if (out_attr[i].i == AEABI_enum_unused
9845 || out_attr[i].i == AEABI_enum_forced_wide)
9847 /* The existing object is compatible with anything.
9848 Use whatever requirements the new object has. */
9849 out_attr[i].i = in_attr[i].i;
9851 else if (in_attr[i].i != AEABI_enum_forced_wide
9852 && out_attr[i].i != in_attr[i].i
9853 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9855 static const char *aeabi_enum_names[] =
9856 { "", "variable-size", "32-bit", "" };
9857 const char *in_name =
9858 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9859 ? aeabi_enum_names[in_attr[i].i]
9861 const char *out_name =
9862 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9863 ? aeabi_enum_names[out_attr[i].i]
9866 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9867 ibfd, in_name, out_name);
9871 case Tag_ABI_VFP_args:
9874 case Tag_ABI_WMMX_args:
9875 if (in_attr[i].i != out_attr[i].i)
9878 (_("error: %B uses iWMMXt register arguments, %B does not"),
9883 case Tag_compatibility:
9884 /* Merged in target-independent code. */
9886 case Tag_ABI_HardFP_use:
9887 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9888 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9889 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9891 else if (in_attr[i].i > out_attr[i].i)
9892 out_attr[i].i = in_attr[i].i;
9894 case Tag_ABI_FP_16bit_format:
9895 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9897 if (in_attr[i].i != out_attr[i].i)
9900 (_("error: fp16 format mismatch between %B and %B"),
9905 if (in_attr[i].i != 0)
9906 out_attr[i].i = in_attr[i].i;
9909 case Tag_nodefaults:
9910 /* This tag is set if it exists, but the value is unused (and is
9911 typically zero). We don't actually need to do anything here -
9912 the merge happens automatically when the type flags are merged
9915 case Tag_also_compatible_with:
9916 /* Already done in Tag_CPU_arch. */
9918 case Tag_conformance:
9919 /* Keep the attribute if it matches. Throw it away otherwise.
9920 No attribute means no claim to conform. */
9921 if (!in_attr[i].s || !out_attr[i].s
9922 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9923 out_attr[i].s = NULL;
9928 bfd *err_bfd = NULL;
9930 /* The "known_obj_attributes" table does contain some undefined
9931 attributes. Ensure that there are unused. */
9932 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9934 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9937 if (err_bfd != NULL)
9939 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9943 (_("%B: Unknown mandatory EABI object attribute %d"),
9945 bfd_set_error (bfd_error_bad_value);
9951 (_("Warning: %B: Unknown EABI object attribute %d"),
9956 /* Only pass on attributes that match in both inputs. */
9957 if (in_attr[i].i != out_attr[i].i
9958 || in_attr[i].s != out_attr[i].s
9959 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9960 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9963 out_attr[i].s = NULL;
9968 /* If out_attr was copied from in_attr then it won't have a type yet. */
9969 if (in_attr[i].type && !out_attr[i].type)
9970 out_attr[i].type = in_attr[i].type;
9973 /* Merge Tag_compatibility attributes and any common GNU ones. */
9974 _bfd_elf_merge_object_attributes (ibfd, obfd);
9976 /* Check for any attributes not known on ARM. */
9977 in_list = elf_other_obj_attributes_proc (ibfd);
9978 out_listp = &elf_other_obj_attributes_proc (obfd);
9979 out_list = *out_listp;
9981 for (; in_list || out_list; )
9983 bfd *err_bfd = NULL;
9986 /* The tags for each list are in numerical order. */
9987 /* If the tags are equal, then merge. */
9988 if (out_list && (!in_list || in_list->tag > out_list->tag))
9990 /* This attribute only exists in obfd. We can't merge, and we don't
9991 know what the tag means, so delete it. */
9993 err_tag = out_list->tag;
9994 *out_listp = out_list->next;
9995 out_list = *out_listp;
9997 else if (in_list && (!out_list || in_list->tag < out_list->tag))
9999 /* This attribute only exists in ibfd. We can't merge, and we don't
10000 know what the tag means, so ignore it. */
10002 err_tag = in_list->tag;
10003 in_list = in_list->next;
10005 else /* The tags are equal. */
10007 /* As present, all attributes in the list are unknown, and
10008 therefore can't be merged meaningfully. */
10010 err_tag = out_list->tag;
10012 /* Only pass on attributes that match in both inputs. */
10013 if (in_list->attr.i != out_list->attr.i
10014 || in_list->attr.s != out_list->attr.s
10015 || (in_list->attr.s && out_list->attr.s
10016 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10018 /* No match. Delete the attribute. */
10019 *out_listp = out_list->next;
10020 out_list = *out_listp;
10024 /* Matched. Keep the attribute and move to the next. */
10025 out_list = out_list->next;
10026 in_list = in_list->next;
10032 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10033 if ((err_tag & 127) < 64)
10036 (_("%B: Unknown mandatory EABI object attribute %d"),
10038 bfd_set_error (bfd_error_bad_value);
10044 (_("Warning: %B: Unknown EABI object attribute %d"),
10053 /* Return TRUE if the two EABI versions are incompatible. */
10056 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10058 /* v4 and v5 are the same spec before and after it was released,
10059 so allow mixing them. */
10060 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10061 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10064 return (iver == over);
10067 /* Merge backend specific data from an object file to the output
10068 object file when linking. */
10071 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10073 flagword out_flags;
10075 bfd_boolean flags_compatible = TRUE;
10078 /* Check if we have the same endianess. */
10079 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10082 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10085 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10088 /* The input BFD must have had its flags initialised. */
10089 /* The following seems bogus to me -- The flags are initialized in
10090 the assembler but I don't think an elf_flags_init field is
10091 written into the object. */
10092 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10094 in_flags = elf_elfheader (ibfd)->e_flags;
10095 out_flags = elf_elfheader (obfd)->e_flags;
10097 /* In theory there is no reason why we couldn't handle this. However
10098 in practice it isn't even close to working and there is no real
10099 reason to want it. */
10100 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10101 && !(ibfd->flags & DYNAMIC)
10102 && (in_flags & EF_ARM_BE8))
10104 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10109 if (!elf_flags_init (obfd))
10111 /* If the input is the default architecture and had the default
10112 flags then do not bother setting the flags for the output
10113 architecture, instead allow future merges to do this. If no
10114 future merges ever set these flags then they will retain their
10115 uninitialised values, which surprise surprise, correspond
10116 to the default values. */
10117 if (bfd_get_arch_info (ibfd)->the_default
10118 && elf_elfheader (ibfd)->e_flags == 0)
10121 elf_flags_init (obfd) = TRUE;
10122 elf_elfheader (obfd)->e_flags = in_flags;
10124 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10125 && bfd_get_arch_info (obfd)->the_default)
10126 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10131 /* Determine what should happen if the input ARM architecture
10132 does not match the output ARM architecture. */
10133 if (! bfd_arm_merge_machines (ibfd, obfd))
10136 /* Identical flags must be compatible. */
10137 if (in_flags == out_flags)
10140 /* Check to see if the input BFD actually contains any sections. If
10141 not, its flags may not have been initialised either, but it
10142 cannot actually cause any incompatiblity. Do not short-circuit
10143 dynamic objects; their section list may be emptied by
10144 elf_link_add_object_symbols.
10146 Also check to see if there are no code sections in the input.
10147 In this case there is no need to check for code specific flags.
10148 XXX - do we need to worry about floating-point format compatability
10149 in data sections ? */
10150 if (!(ibfd->flags & DYNAMIC))
10152 bfd_boolean null_input_bfd = TRUE;
10153 bfd_boolean only_data_sections = TRUE;
10155 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10157 /* Ignore synthetic glue sections. */
10158 if (strcmp (sec->name, ".glue_7")
10159 && strcmp (sec->name, ".glue_7t"))
10161 if ((bfd_get_section_flags (ibfd, sec)
10162 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10163 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10164 only_data_sections = FALSE;
10166 null_input_bfd = FALSE;
10171 if (null_input_bfd || only_data_sections)
10175 /* Complain about various flag mismatches. */
10176 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10177 EF_ARM_EABI_VERSION (out_flags)))
10180 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10182 (in_flags & EF_ARM_EABIMASK) >> 24,
10183 (out_flags & EF_ARM_EABIMASK) >> 24);
10187 /* Not sure what needs to be checked for EABI versions >= 1. */
10188 /* VxWorks libraries do not use these flags. */
10189 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10190 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10191 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10193 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10196 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10198 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10199 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10200 flags_compatible = FALSE;
10203 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10205 if (in_flags & EF_ARM_APCS_FLOAT)
10207 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10211 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10214 flags_compatible = FALSE;
10217 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10219 if (in_flags & EF_ARM_VFP_FLOAT)
10221 (_("error: %B uses VFP instructions, whereas %B does not"),
10225 (_("error: %B uses FPA instructions, whereas %B does not"),
10228 flags_compatible = FALSE;
10231 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10233 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10235 (_("error: %B uses Maverick instructions, whereas %B does not"),
10239 (_("error: %B does not use Maverick instructions, whereas %B does"),
10242 flags_compatible = FALSE;
10245 #ifdef EF_ARM_SOFT_FLOAT
10246 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10248 /* We can allow interworking between code that is VFP format
10249 layout, and uses either soft float or integer regs for
10250 passing floating point arguments and results. We already
10251 know that the APCS_FLOAT flags match; similarly for VFP
10253 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10254 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10256 if (in_flags & EF_ARM_SOFT_FLOAT)
10258 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10262 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10265 flags_compatible = FALSE;
10270 /* Interworking mismatch is only a warning. */
10271 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10273 if (in_flags & EF_ARM_INTERWORK)
10276 (_("Warning: %B supports interworking, whereas %B does not"),
10282 (_("Warning: %B does not support interworking, whereas %B does"),
10288 return flags_compatible;
10291 /* Display the flags field. */
10294 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10296 FILE * file = (FILE *) ptr;
10297 unsigned long flags;
10299 BFD_ASSERT (abfd != NULL && ptr != NULL);
10301 /* Print normal ELF private data. */
10302 _bfd_elf_print_private_bfd_data (abfd, ptr);
10304 flags = elf_elfheader (abfd)->e_flags;
10305 /* Ignore init flag - it may not be set, despite the flags field
10306 containing valid data. */
10308 /* xgettext:c-format */
10309 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10311 switch (EF_ARM_EABI_VERSION (flags))
10313 case EF_ARM_EABI_UNKNOWN:
10314 /* The following flag bits are GNU extensions and not part of the
10315 official ARM ELF extended ABI. Hence they are only decoded if
10316 the EABI version is not set. */
10317 if (flags & EF_ARM_INTERWORK)
10318 fprintf (file, _(" [interworking enabled]"));
10320 if (flags & EF_ARM_APCS_26)
10321 fprintf (file, " [APCS-26]");
10323 fprintf (file, " [APCS-32]");
10325 if (flags & EF_ARM_VFP_FLOAT)
10326 fprintf (file, _(" [VFP float format]"));
10327 else if (flags & EF_ARM_MAVERICK_FLOAT)
10328 fprintf (file, _(" [Maverick float format]"));
10330 fprintf (file, _(" [FPA float format]"));
10332 if (flags & EF_ARM_APCS_FLOAT)
10333 fprintf (file, _(" [floats passed in float registers]"));
10335 if (flags & EF_ARM_PIC)
10336 fprintf (file, _(" [position independent]"));
10338 if (flags & EF_ARM_NEW_ABI)
10339 fprintf (file, _(" [new ABI]"));
10341 if (flags & EF_ARM_OLD_ABI)
10342 fprintf (file, _(" [old ABI]"));
10344 if (flags & EF_ARM_SOFT_FLOAT)
10345 fprintf (file, _(" [software FP]"));
10347 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10348 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10349 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10350 | EF_ARM_MAVERICK_FLOAT);
10353 case EF_ARM_EABI_VER1:
10354 fprintf (file, _(" [Version1 EABI]"));
10356 if (flags & EF_ARM_SYMSARESORTED)
10357 fprintf (file, _(" [sorted symbol table]"));
10359 fprintf (file, _(" [unsorted symbol table]"));
10361 flags &= ~ EF_ARM_SYMSARESORTED;
10364 case EF_ARM_EABI_VER2:
10365 fprintf (file, _(" [Version2 EABI]"));
10367 if (flags & EF_ARM_SYMSARESORTED)
10368 fprintf (file, _(" [sorted symbol table]"));
10370 fprintf (file, _(" [unsorted symbol table]"));
10372 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10373 fprintf (file, _(" [dynamic symbols use segment index]"));
10375 if (flags & EF_ARM_MAPSYMSFIRST)
10376 fprintf (file, _(" [mapping symbols precede others]"));
10378 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10379 | EF_ARM_MAPSYMSFIRST);
10382 case EF_ARM_EABI_VER3:
10383 fprintf (file, _(" [Version3 EABI]"));
10386 case EF_ARM_EABI_VER4:
10387 fprintf (file, _(" [Version4 EABI]"));
10390 case EF_ARM_EABI_VER5:
10391 fprintf (file, _(" [Version5 EABI]"));
10393 if (flags & EF_ARM_BE8)
10394 fprintf (file, _(" [BE8]"));
10396 if (flags & EF_ARM_LE8)
10397 fprintf (file, _(" [LE8]"));
10399 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10403 fprintf (file, _(" <EABI version unrecognised>"));
10407 flags &= ~ EF_ARM_EABIMASK;
10409 if (flags & EF_ARM_RELEXEC)
10410 fprintf (file, _(" [relocatable executable]"));
10412 if (flags & EF_ARM_HASENTRY)
10413 fprintf (file, _(" [has entry point]"));
10415 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10418 fprintf (file, _("<Unrecognised flag bits set>"));
10420 fputc ('\n', file);
10426 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10428 switch (ELF_ST_TYPE (elf_sym->st_info))
10430 case STT_ARM_TFUNC:
10431 return ELF_ST_TYPE (elf_sym->st_info);
10433 case STT_ARM_16BIT:
10434 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10435 This allows us to distinguish between data used by Thumb instructions
10436 and non-data (which is probably code) inside Thumb regions of an
10438 if (type != STT_OBJECT && type != STT_TLS)
10439 return ELF_ST_TYPE (elf_sym->st_info);
10450 elf32_arm_gc_mark_hook (asection *sec,
10451 struct bfd_link_info *info,
10452 Elf_Internal_Rela *rel,
10453 struct elf_link_hash_entry *h,
10454 Elf_Internal_Sym *sym)
10457 switch (ELF32_R_TYPE (rel->r_info))
10459 case R_ARM_GNU_VTINHERIT:
10460 case R_ARM_GNU_VTENTRY:
10464 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10467 /* Update the got entry reference counts for the section being removed. */
10470 elf32_arm_gc_sweep_hook (bfd * abfd,
10471 struct bfd_link_info * info,
10473 const Elf_Internal_Rela * relocs)
10475 Elf_Internal_Shdr *symtab_hdr;
10476 struct elf_link_hash_entry **sym_hashes;
10477 bfd_signed_vma *local_got_refcounts;
10478 const Elf_Internal_Rela *rel, *relend;
10479 struct elf32_arm_link_hash_table * globals;
10481 if (info->relocatable)
10484 globals = elf32_arm_hash_table (info);
10486 elf_section_data (sec)->local_dynrel = NULL;
10488 symtab_hdr = & elf_symtab_hdr (abfd);
10489 sym_hashes = elf_sym_hashes (abfd);
10490 local_got_refcounts = elf_local_got_refcounts (abfd);
10492 check_use_blx (globals);
10494 relend = relocs + sec->reloc_count;
10495 for (rel = relocs; rel < relend; rel++)
10497 unsigned long r_symndx;
10498 struct elf_link_hash_entry *h = NULL;
10501 r_symndx = ELF32_R_SYM (rel->r_info);
10502 if (r_symndx >= symtab_hdr->sh_info)
10504 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10505 while (h->root.type == bfd_link_hash_indirect
10506 || h->root.type == bfd_link_hash_warning)
10507 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10510 r_type = ELF32_R_TYPE (rel->r_info);
10511 r_type = arm_real_reloc_type (globals, r_type);
10515 case R_ARM_GOT_PREL:
10516 case R_ARM_TLS_GD32:
10517 case R_ARM_TLS_IE32:
10520 if (h->got.refcount > 0)
10521 h->got.refcount -= 1;
10523 else if (local_got_refcounts != NULL)
10525 if (local_got_refcounts[r_symndx] > 0)
10526 local_got_refcounts[r_symndx] -= 1;
10530 case R_ARM_TLS_LDM32:
10531 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10535 case R_ARM_ABS32_NOI:
10537 case R_ARM_REL32_NOI:
10543 case R_ARM_THM_CALL:
10544 case R_ARM_THM_JUMP24:
10545 case R_ARM_THM_JUMP19:
10546 case R_ARM_MOVW_ABS_NC:
10547 case R_ARM_MOVT_ABS:
10548 case R_ARM_MOVW_PREL_NC:
10549 case R_ARM_MOVT_PREL:
10550 case R_ARM_THM_MOVW_ABS_NC:
10551 case R_ARM_THM_MOVT_ABS:
10552 case R_ARM_THM_MOVW_PREL_NC:
10553 case R_ARM_THM_MOVT_PREL:
10554 /* Should the interworking branches be here also? */
10558 struct elf32_arm_link_hash_entry *eh;
10559 struct elf32_arm_relocs_copied **pp;
10560 struct elf32_arm_relocs_copied *p;
10562 eh = (struct elf32_arm_link_hash_entry *) h;
10564 if (h->plt.refcount > 0)
10566 h->plt.refcount -= 1;
10567 if (r_type == R_ARM_THM_CALL)
10568 eh->plt_maybe_thumb_refcount--;
10570 if (r_type == R_ARM_THM_JUMP24
10571 || r_type == R_ARM_THM_JUMP19)
10572 eh->plt_thumb_refcount--;
10575 if (r_type == R_ARM_ABS32
10576 || r_type == R_ARM_REL32
10577 || r_type == R_ARM_ABS32_NOI
10578 || r_type == R_ARM_REL32_NOI)
10580 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10582 if (p->section == sec)
10585 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10586 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10604 /* Look through the relocs for a section during the first phase. */
10607 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10608 asection *sec, const Elf_Internal_Rela *relocs)
10610 Elf_Internal_Shdr *symtab_hdr;
10611 struct elf_link_hash_entry **sym_hashes;
10612 const Elf_Internal_Rela *rel;
10613 const Elf_Internal_Rela *rel_end;
10616 bfd_vma *local_got_offsets;
10617 struct elf32_arm_link_hash_table *htab;
10618 bfd_boolean needs_plt;
10619 unsigned long nsyms;
10621 if (info->relocatable)
10624 BFD_ASSERT (is_arm_elf (abfd));
10626 htab = elf32_arm_hash_table (info);
10629 /* Create dynamic sections for relocatable executables so that we can
10630 copy relocations. */
10631 if (htab->root.is_relocatable_executable
10632 && ! htab->root.dynamic_sections_created)
10634 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10638 dynobj = elf_hash_table (info)->dynobj;
10639 local_got_offsets = elf_local_got_offsets (abfd);
10641 symtab_hdr = & elf_symtab_hdr (abfd);
10642 sym_hashes = elf_sym_hashes (abfd);
10643 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10645 rel_end = relocs + sec->reloc_count;
10646 for (rel = relocs; rel < rel_end; rel++)
10648 struct elf_link_hash_entry *h;
10649 struct elf32_arm_link_hash_entry *eh;
10650 unsigned long r_symndx;
10653 r_symndx = ELF32_R_SYM (rel->r_info);
10654 r_type = ELF32_R_TYPE (rel->r_info);
10655 r_type = arm_real_reloc_type (htab, r_type);
10657 if (r_symndx >= nsyms
10658 /* PR 9934: It is possible to have relocations that do not
10659 refer to symbols, thus it is also possible to have an
10660 object file containing relocations but no symbol table. */
10661 && (r_symndx > 0 || nsyms > 0))
10663 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10668 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10672 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10673 while (h->root.type == bfd_link_hash_indirect
10674 || h->root.type == bfd_link_hash_warning)
10675 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10678 eh = (struct elf32_arm_link_hash_entry *) h;
10683 case R_ARM_GOT_PREL:
10684 case R_ARM_TLS_GD32:
10685 case R_ARM_TLS_IE32:
10686 /* This symbol requires a global offset table entry. */
10688 int tls_type, old_tls_type;
10692 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10693 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10694 default: tls_type = GOT_NORMAL; break;
10700 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10704 bfd_signed_vma *local_got_refcounts;
10706 /* This is a global offset table entry for a local symbol. */
10707 local_got_refcounts = elf_local_got_refcounts (abfd);
10708 if (local_got_refcounts == NULL)
10710 bfd_size_type size;
10712 size = symtab_hdr->sh_info;
10713 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10714 local_got_refcounts = bfd_zalloc (abfd, size);
10715 if (local_got_refcounts == NULL)
10717 elf_local_got_refcounts (abfd) = local_got_refcounts;
10718 elf32_arm_local_got_tls_type (abfd)
10719 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10721 local_got_refcounts[r_symndx] += 1;
10722 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10725 /* We will already have issued an error message if there is a
10726 TLS / non-TLS mismatch, based on the symbol type. We don't
10727 support any linker relaxations. So just combine any TLS
10729 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10730 && tls_type != GOT_NORMAL)
10731 tls_type |= old_tls_type;
10733 if (old_tls_type != tls_type)
10736 elf32_arm_hash_entry (h)->tls_type = tls_type;
10738 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10741 /* Fall through. */
10743 case R_ARM_TLS_LDM32:
10744 if (r_type == R_ARM_TLS_LDM32)
10745 htab->tls_ldm_got.refcount++;
10746 /* Fall through. */
10748 case R_ARM_GOTOFF32:
10750 if (htab->sgot == NULL)
10752 if (htab->root.dynobj == NULL)
10753 htab->root.dynobj = abfd;
10754 if (!create_got_section (htab->root.dynobj, info))
10760 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10761 ldr __GOTT_INDEX__ offsets. */
10762 if (!htab->vxworks_p)
10764 /* Fall through. */
10771 case R_ARM_THM_CALL:
10772 case R_ARM_THM_JUMP24:
10773 case R_ARM_THM_JUMP19:
10777 case R_ARM_MOVW_ABS_NC:
10778 case R_ARM_MOVT_ABS:
10779 case R_ARM_THM_MOVW_ABS_NC:
10780 case R_ARM_THM_MOVT_ABS:
10783 (*_bfd_error_handler)
10784 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10785 abfd, elf32_arm_howto_table_1[r_type].name,
10786 (h) ? h->root.root.string : "a local symbol");
10787 bfd_set_error (bfd_error_bad_value);
10791 /* Fall through. */
10793 case R_ARM_ABS32_NOI:
10795 case R_ARM_REL32_NOI:
10796 case R_ARM_MOVW_PREL_NC:
10797 case R_ARM_MOVT_PREL:
10798 case R_ARM_THM_MOVW_PREL_NC:
10799 case R_ARM_THM_MOVT_PREL:
10803 /* Should the interworking branches be listed here? */
10806 /* If this reloc is in a read-only section, we might
10807 need a copy reloc. We can't check reliably at this
10808 stage whether the section is read-only, as input
10809 sections have not yet been mapped to output sections.
10810 Tentatively set the flag for now, and correct in
10811 adjust_dynamic_symbol. */
10813 h->non_got_ref = 1;
10815 /* We may need a .plt entry if the function this reloc
10816 refers to is in a different object. We can't tell for
10817 sure yet, because something later might force the
10822 /* If we create a PLT entry, this relocation will reference
10823 it, even if it's an ABS32 relocation. */
10824 h->plt.refcount += 1;
10826 /* It's too early to use htab->use_blx here, so we have to
10827 record possible blx references separately from
10828 relocs that definitely need a thumb stub. */
10830 if (r_type == R_ARM_THM_CALL)
10831 eh->plt_maybe_thumb_refcount += 1;
10833 if (r_type == R_ARM_THM_JUMP24
10834 || r_type == R_ARM_THM_JUMP19)
10835 eh->plt_thumb_refcount += 1;
10838 /* If we are creating a shared library or relocatable executable,
10839 and this is a reloc against a global symbol, or a non PC
10840 relative reloc against a local symbol, then we need to copy
10841 the reloc into the shared library. However, if we are linking
10842 with -Bsymbolic, we do not need to copy a reloc against a
10843 global symbol which is defined in an object we are
10844 including in the link (i.e., DEF_REGULAR is set). At
10845 this point we have not seen all the input files, so it is
10846 possible that DEF_REGULAR is not set now but will be set
10847 later (it is never cleared). We account for that
10848 possibility below by storing information in the
10849 relocs_copied field of the hash table entry. */
10850 if ((info->shared || htab->root.is_relocatable_executable)
10851 && (sec->flags & SEC_ALLOC) != 0
10852 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10853 || (h != NULL && ! h->needs_plt
10854 && (! info->symbolic || ! h->def_regular))))
10856 struct elf32_arm_relocs_copied *p, **head;
10858 /* When creating a shared object, we must copy these
10859 reloc types into the output file. We create a reloc
10860 section in dynobj and make room for this reloc. */
10861 if (sreloc == NULL)
10863 sreloc = _bfd_elf_make_dynamic_reloc_section
10864 (sec, dynobj, 2, abfd, ! htab->use_rel);
10866 if (sreloc == NULL)
10869 /* BPABI objects never have dynamic relocations mapped. */
10870 if (htab->symbian_p)
10874 flags = bfd_get_section_flags (dynobj, sreloc);
10875 flags &= ~(SEC_LOAD | SEC_ALLOC);
10876 bfd_set_section_flags (dynobj, sreloc, flags);
10880 /* If this is a global symbol, we count the number of
10881 relocations we need for this symbol. */
10884 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10888 /* Track dynamic relocs needed for local syms too.
10889 We really need local syms available to do this
10890 easily. Oh well. */
10893 Elf_Internal_Sym *isym;
10895 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10900 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10904 vpp = &elf_section_data (s)->local_dynrel;
10905 head = (struct elf32_arm_relocs_copied **) vpp;
10909 if (p == NULL || p->section != sec)
10911 bfd_size_type amt = sizeof *p;
10913 p = bfd_alloc (htab->root.dynobj, amt);
10923 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10929 /* This relocation describes the C++ object vtable hierarchy.
10930 Reconstruct it for later use during GC. */
10931 case R_ARM_GNU_VTINHERIT:
10932 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10936 /* This relocation describes which C++ vtable entries are actually
10937 used. Record for later use during GC. */
10938 case R_ARM_GNU_VTENTRY:
10939 BFD_ASSERT (h != NULL);
10941 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10950 /* Unwinding tables are not referenced directly. This pass marks them as
10951 required if the corresponding code section is marked. */
10954 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10955 elf_gc_mark_hook_fn gc_mark_hook)
10958 Elf_Internal_Shdr **elf_shdrp;
10961 /* Marking EH data may cause additional code sections to be marked,
10962 requiring multiple passes. */
10967 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10971 if (! is_arm_elf (sub))
10974 elf_shdrp = elf_elfsections (sub);
10975 for (o = sub->sections; o != NULL; o = o->next)
10977 Elf_Internal_Shdr *hdr;
10979 hdr = &elf_section_data (o)->this_hdr;
10980 if (hdr->sh_type == SHT_ARM_EXIDX
10982 && hdr->sh_link < elf_numsections (sub)
10984 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10987 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10997 /* Treat mapping symbols as special target symbols. */
11000 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11002 return bfd_is_arm_special_symbol_name (sym->name,
11003 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11006 /* This is a copy of elf_find_function() from elf.c except that
11007 ARM mapping symbols are ignored when looking for function names
11008 and STT_ARM_TFUNC is considered to a function type. */
11011 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11012 asection * section,
11013 asymbol ** symbols,
11015 const char ** filename_ptr,
11016 const char ** functionname_ptr)
11018 const char * filename = NULL;
11019 asymbol * func = NULL;
11020 bfd_vma low_func = 0;
11023 for (p = symbols; *p != NULL; p++)
11025 elf_symbol_type *q;
11027 q = (elf_symbol_type *) *p;
11029 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11034 filename = bfd_asymbol_name (&q->symbol);
11037 case STT_ARM_TFUNC:
11039 /* Skip mapping symbols. */
11040 if ((q->symbol.flags & BSF_LOCAL)
11041 && bfd_is_arm_special_symbol_name (q->symbol.name,
11042 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11044 /* Fall through. */
11045 if (bfd_get_section (&q->symbol) == section
11046 && q->symbol.value >= low_func
11047 && q->symbol.value <= offset)
11049 func = (asymbol *) q;
11050 low_func = q->symbol.value;
11060 *filename_ptr = filename;
11061 if (functionname_ptr)
11062 *functionname_ptr = bfd_asymbol_name (func);
11068 /* Find the nearest line to a particular section and offset, for error
11069 reporting. This code is a duplicate of the code in elf.c, except
11070 that it uses arm_elf_find_function. */
11073 elf32_arm_find_nearest_line (bfd * abfd,
11074 asection * section,
11075 asymbol ** symbols,
11077 const char ** filename_ptr,
11078 const char ** functionname_ptr,
11079 unsigned int * line_ptr)
11081 bfd_boolean found = FALSE;
11083 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11085 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11086 filename_ptr, functionname_ptr,
11088 & elf_tdata (abfd)->dwarf2_find_line_info))
11090 if (!*functionname_ptr)
11091 arm_elf_find_function (abfd, section, symbols, offset,
11092 *filename_ptr ? NULL : filename_ptr,
11098 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11099 & found, filename_ptr,
11100 functionname_ptr, line_ptr,
11101 & elf_tdata (abfd)->line_info))
11104 if (found && (*functionname_ptr || *line_ptr))
11107 if (symbols == NULL)
11110 if (! arm_elf_find_function (abfd, section, symbols, offset,
11111 filename_ptr, functionname_ptr))
11119 elf32_arm_find_inliner_info (bfd * abfd,
11120 const char ** filename_ptr,
11121 const char ** functionname_ptr,
11122 unsigned int * line_ptr)
11125 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11126 functionname_ptr, line_ptr,
11127 & elf_tdata (abfd)->dwarf2_find_line_info);
11131 /* Adjust a symbol defined by a dynamic object and referenced by a
11132 regular object. The current definition is in some section of the
11133 dynamic object, but we're not including those sections. We have to
11134 change the definition to something the rest of the link can
11138 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11139 struct elf_link_hash_entry * h)
11143 struct elf32_arm_link_hash_entry * eh;
11144 struct elf32_arm_link_hash_table *globals;
11146 globals = elf32_arm_hash_table (info);
11147 dynobj = elf_hash_table (info)->dynobj;
11149 /* Make sure we know what is going on here. */
11150 BFD_ASSERT (dynobj != NULL
11152 || h->u.weakdef != NULL
11155 && !h->def_regular)));
11157 eh = (struct elf32_arm_link_hash_entry *) h;
11159 /* If this is a function, put it in the procedure linkage table. We
11160 will fill in the contents of the procedure linkage table later,
11161 when we know the address of the .got section. */
11162 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11165 if (h->plt.refcount <= 0
11166 || SYMBOL_CALLS_LOCAL (info, h)
11167 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11168 && h->root.type == bfd_link_hash_undefweak))
11170 /* This case can occur if we saw a PLT32 reloc in an input
11171 file, but the symbol was never referred to by a dynamic
11172 object, or if all references were garbage collected. In
11173 such a case, we don't actually need to build a procedure
11174 linkage table, and we can just do a PC24 reloc instead. */
11175 h->plt.offset = (bfd_vma) -1;
11176 eh->plt_thumb_refcount = 0;
11177 eh->plt_maybe_thumb_refcount = 0;
11185 /* It's possible that we incorrectly decided a .plt reloc was
11186 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11187 in check_relocs. We can't decide accurately between function
11188 and non-function syms in check-relocs; Objects loaded later in
11189 the link may change h->type. So fix it now. */
11190 h->plt.offset = (bfd_vma) -1;
11191 eh->plt_thumb_refcount = 0;
11192 eh->plt_maybe_thumb_refcount = 0;
11195 /* If this is a weak symbol, and there is a real definition, the
11196 processor independent code will have arranged for us to see the
11197 real definition first, and we can just use the same value. */
11198 if (h->u.weakdef != NULL)
11200 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11201 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11202 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11203 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11207 /* If there are no non-GOT references, we do not need a copy
11209 if (!h->non_got_ref)
11212 /* This is a reference to a symbol defined by a dynamic object which
11213 is not a function. */
11215 /* If we are creating a shared library, we must presume that the
11216 only references to the symbol are via the global offset table.
11217 For such cases we need not do anything here; the relocations will
11218 be handled correctly by relocate_section. Relocatable executables
11219 can reference data in shared objects directly, so we don't need to
11220 do anything here. */
11221 if (info->shared || globals->root.is_relocatable_executable)
11226 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11227 h->root.root.string);
11231 /* We must allocate the symbol in our .dynbss section, which will
11232 become part of the .bss section of the executable. There will be
11233 an entry for this symbol in the .dynsym section. The dynamic
11234 object will contain position independent code, so all references
11235 from the dynamic object to this symbol will go through the global
11236 offset table. The dynamic linker will use the .dynsym entry to
11237 determine the address it must put in the global offset table, so
11238 both the dynamic object and the regular object will refer to the
11239 same memory location for the variable. */
11240 s = bfd_get_section_by_name (dynobj, ".dynbss");
11241 BFD_ASSERT (s != NULL);
11243 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11244 copy the initial value out of the dynamic object and into the
11245 runtime process image. We need to remember the offset into the
11246 .rel(a).bss section we are going to use. */
11247 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11251 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11252 BFD_ASSERT (srel != NULL);
11253 srel->size += RELOC_SIZE (globals);
11257 return _bfd_elf_adjust_dynamic_copy (h, s);
11260 /* Allocate space in .plt, .got and associated reloc sections for
11264 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11266 struct bfd_link_info *info;
11267 struct elf32_arm_link_hash_table *htab;
11268 struct elf32_arm_link_hash_entry *eh;
11269 struct elf32_arm_relocs_copied *p;
11270 bfd_signed_vma thumb_refs;
11272 eh = (struct elf32_arm_link_hash_entry *) h;
11274 if (h->root.type == bfd_link_hash_indirect)
11277 if (h->root.type == bfd_link_hash_warning)
11278 /* When warning symbols are created, they **replace** the "real"
11279 entry in the hash table, thus we never get to see the real
11280 symbol in a hash traversal. So look at it now. */
11281 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11283 info = (struct bfd_link_info *) inf;
11284 htab = elf32_arm_hash_table (info);
11286 if (htab->root.dynamic_sections_created
11287 && h->plt.refcount > 0)
11289 /* Make sure this symbol is output as a dynamic symbol.
11290 Undefined weak syms won't yet be marked as dynamic. */
11291 if (h->dynindx == -1
11292 && !h->forced_local)
11294 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11299 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11301 asection *s = htab->splt;
11303 /* If this is the first .plt entry, make room for the special
11306 s->size += htab->plt_header_size;
11308 h->plt.offset = s->size;
11310 /* If we will insert a Thumb trampoline before this PLT, leave room
11312 thumb_refs = eh->plt_thumb_refcount;
11313 if (!htab->use_blx)
11314 thumb_refs += eh->plt_maybe_thumb_refcount;
11316 if (thumb_refs > 0)
11318 h->plt.offset += PLT_THUMB_STUB_SIZE;
11319 s->size += PLT_THUMB_STUB_SIZE;
11322 /* If this symbol is not defined in a regular file, and we are
11323 not generating a shared library, then set the symbol to this
11324 location in the .plt. This is required to make function
11325 pointers compare as equal between the normal executable and
11326 the shared library. */
11328 && !h->def_regular)
11330 h->root.u.def.section = s;
11331 h->root.u.def.value = h->plt.offset;
11334 /* Make sure the function is not marked as Thumb, in case
11335 it is the target of an ABS32 relocation, which will
11336 point to the PLT entry. */
11337 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11338 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11340 /* Make room for this entry. */
11341 s->size += htab->plt_entry_size;
11343 if (!htab->symbian_p)
11345 /* We also need to make an entry in the .got.plt section, which
11346 will be placed in the .got section by the linker script. */
11347 eh->plt_got_offset = htab->sgotplt->size;
11348 htab->sgotplt->size += 4;
11351 /* We also need to make an entry in the .rel(a).plt section. */
11352 htab->srelplt->size += RELOC_SIZE (htab);
11354 /* VxWorks executables have a second set of relocations for
11355 each PLT entry. They go in a separate relocation section,
11356 which is processed by the kernel loader. */
11357 if (htab->vxworks_p && !info->shared)
11359 /* There is a relocation for the initial PLT entry:
11360 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11361 if (h->plt.offset == htab->plt_header_size)
11362 htab->srelplt2->size += RELOC_SIZE (htab);
11364 /* There are two extra relocations for each subsequent
11365 PLT entry: an R_ARM_32 relocation for the GOT entry,
11366 and an R_ARM_32 relocation for the PLT entry. */
11367 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11372 h->plt.offset = (bfd_vma) -1;
11378 h->plt.offset = (bfd_vma) -1;
11382 if (h->got.refcount > 0)
11386 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11389 /* Make sure this symbol is output as a dynamic symbol.
11390 Undefined weak syms won't yet be marked as dynamic. */
11391 if (h->dynindx == -1
11392 && !h->forced_local)
11394 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11398 if (!htab->symbian_p)
11401 h->got.offset = s->size;
11403 if (tls_type == GOT_UNKNOWN)
11406 if (tls_type == GOT_NORMAL)
11407 /* Non-TLS symbols need one GOT slot. */
11411 if (tls_type & GOT_TLS_GD)
11412 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11414 if (tls_type & GOT_TLS_IE)
11415 /* R_ARM_TLS_IE32 needs one GOT slot. */
11419 dyn = htab->root.dynamic_sections_created;
11422 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11424 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11427 if (tls_type != GOT_NORMAL
11428 && (info->shared || indx != 0)
11429 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11430 || h->root.type != bfd_link_hash_undefweak))
11432 if (tls_type & GOT_TLS_IE)
11433 htab->srelgot->size += RELOC_SIZE (htab);
11435 if (tls_type & GOT_TLS_GD)
11436 htab->srelgot->size += RELOC_SIZE (htab);
11438 if ((tls_type & GOT_TLS_GD) && indx != 0)
11439 htab->srelgot->size += RELOC_SIZE (htab);
11441 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11442 || h->root.type != bfd_link_hash_undefweak)
11444 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11445 htab->srelgot->size += RELOC_SIZE (htab);
11449 h->got.offset = (bfd_vma) -1;
11451 /* Allocate stubs for exported Thumb functions on v4t. */
11452 if (!htab->use_blx && h->dynindx != -1
11454 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11455 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11457 struct elf_link_hash_entry * th;
11458 struct bfd_link_hash_entry * bh;
11459 struct elf_link_hash_entry * myh;
11463 /* Create a new symbol to regist the real location of the function. */
11464 s = h->root.u.def.section;
11465 sprintf (name, "__real_%s", h->root.root.string);
11466 _bfd_generic_link_add_one_symbol (info, s->owner,
11467 name, BSF_GLOBAL, s,
11468 h->root.u.def.value,
11469 NULL, TRUE, FALSE, &bh);
11471 myh = (struct elf_link_hash_entry *) bh;
11472 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11473 myh->forced_local = 1;
11474 eh->export_glue = myh;
11475 th = record_arm_to_thumb_glue (info, h);
11476 /* Point the symbol at the stub. */
11477 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11478 h->root.u.def.section = th->root.u.def.section;
11479 h->root.u.def.value = th->root.u.def.value & ~1;
11482 if (eh->relocs_copied == NULL)
11485 /* In the shared -Bsymbolic case, discard space allocated for
11486 dynamic pc-relative relocs against symbols which turn out to be
11487 defined in regular objects. For the normal shared case, discard
11488 space for pc-relative relocs that have become local due to symbol
11489 visibility changes. */
11491 if (info->shared || htab->root.is_relocatable_executable)
11493 /* The only relocs that use pc_count are R_ARM_REL32 and
11494 R_ARM_REL32_NOI, which will appear on something like
11495 ".long foo - .". We want calls to protected symbols to resolve
11496 directly to the function rather than going via the plt. If people
11497 want function pointer comparisons to work as expected then they
11498 should avoid writing assembly like ".long foo - .". */
11499 if (SYMBOL_CALLS_LOCAL (info, h))
11501 struct elf32_arm_relocs_copied **pp;
11503 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11505 p->count -= p->pc_count;
11514 if (elf32_arm_hash_table (info)->vxworks_p)
11516 struct elf32_arm_relocs_copied **pp;
11518 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11520 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11527 /* Also discard relocs on undefined weak syms with non-default
11529 if (eh->relocs_copied != NULL
11530 && h->root.type == bfd_link_hash_undefweak)
11532 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11533 eh->relocs_copied = NULL;
11535 /* Make sure undefined weak symbols are output as a dynamic
11537 else if (h->dynindx == -1
11538 && !h->forced_local)
11540 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11545 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11546 && h->root.type == bfd_link_hash_new)
11548 /* Output absolute symbols so that we can create relocations
11549 against them. For normal symbols we output a relocation
11550 against the section that contains them. */
11551 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11558 /* For the non-shared case, discard space for relocs against
11559 symbols which turn out to need copy relocs or are not
11562 if (!h->non_got_ref
11563 && ((h->def_dynamic
11564 && !h->def_regular)
11565 || (htab->root.dynamic_sections_created
11566 && (h->root.type == bfd_link_hash_undefweak
11567 || h->root.type == bfd_link_hash_undefined))))
11569 /* Make sure this symbol is output as a dynamic symbol.
11570 Undefined weak syms won't yet be marked as dynamic. */
11571 if (h->dynindx == -1
11572 && !h->forced_local)
11574 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11578 /* If that succeeded, we know we'll be keeping all the
11580 if (h->dynindx != -1)
11584 eh->relocs_copied = NULL;
11589 /* Finally, allocate space. */
11590 for (p = eh->relocs_copied; p != NULL; p = p->next)
11592 asection *sreloc = elf_section_data (p->section)->sreloc;
11593 sreloc->size += p->count * RELOC_SIZE (htab);
11599 /* Find any dynamic relocs that apply to read-only sections. */
11602 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11604 struct elf32_arm_link_hash_entry * eh;
11605 struct elf32_arm_relocs_copied * p;
11607 if (h->root.type == bfd_link_hash_warning)
11608 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11610 eh = (struct elf32_arm_link_hash_entry *) h;
11611 for (p = eh->relocs_copied; p != NULL; p = p->next)
11613 asection *s = p->section;
11615 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11617 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11619 info->flags |= DF_TEXTREL;
11621 /* Not an error, just cut short the traversal. */
11629 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11632 struct elf32_arm_link_hash_table *globals;
11634 globals = elf32_arm_hash_table (info);
11635 globals->byteswap_code = byteswap_code;
11638 /* Set the sizes of the dynamic sections. */
11641 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11642 struct bfd_link_info * info)
11647 bfd_boolean relocs;
11649 struct elf32_arm_link_hash_table *htab;
11651 htab = elf32_arm_hash_table (info);
11652 dynobj = elf_hash_table (info)->dynobj;
11653 BFD_ASSERT (dynobj != NULL);
11654 check_use_blx (htab);
11656 if (elf_hash_table (info)->dynamic_sections_created)
11658 /* Set the contents of the .interp section to the interpreter. */
11659 if (info->executable)
11661 s = bfd_get_section_by_name (dynobj, ".interp");
11662 BFD_ASSERT (s != NULL);
11663 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11664 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11668 /* Set up .got offsets for local syms, and space for local dynamic
11670 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11672 bfd_signed_vma *local_got;
11673 bfd_signed_vma *end_local_got;
11674 char *local_tls_type;
11675 bfd_size_type locsymcount;
11676 Elf_Internal_Shdr *symtab_hdr;
11678 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11680 if (! is_arm_elf (ibfd))
11683 for (s = ibfd->sections; s != NULL; s = s->next)
11685 struct elf32_arm_relocs_copied *p;
11687 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11689 if (!bfd_is_abs_section (p->section)
11690 && bfd_is_abs_section (p->section->output_section))
11692 /* Input section has been discarded, either because
11693 it is a copy of a linkonce section or due to
11694 linker script /DISCARD/, so we'll be discarding
11697 else if (is_vxworks
11698 && strcmp (p->section->output_section->name,
11701 /* Relocations in vxworks .tls_vars sections are
11702 handled specially by the loader. */
11704 else if (p->count != 0)
11706 srel = elf_section_data (p->section)->sreloc;
11707 srel->size += p->count * RELOC_SIZE (htab);
11708 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11709 info->flags |= DF_TEXTREL;
11714 local_got = elf_local_got_refcounts (ibfd);
11718 symtab_hdr = & elf_symtab_hdr (ibfd);
11719 locsymcount = symtab_hdr->sh_info;
11720 end_local_got = local_got + locsymcount;
11721 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11723 srel = htab->srelgot;
11724 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11726 if (*local_got > 0)
11728 *local_got = s->size;
11729 if (*local_tls_type & GOT_TLS_GD)
11730 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11732 if (*local_tls_type & GOT_TLS_IE)
11734 if (*local_tls_type == GOT_NORMAL)
11737 if (info->shared || *local_tls_type == GOT_TLS_GD)
11738 srel->size += RELOC_SIZE (htab);
11741 *local_got = (bfd_vma) -1;
11745 if (htab->tls_ldm_got.refcount > 0)
11747 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11748 for R_ARM_TLS_LDM32 relocations. */
11749 htab->tls_ldm_got.offset = htab->sgot->size;
11750 htab->sgot->size += 8;
11752 htab->srelgot->size += RELOC_SIZE (htab);
11755 htab->tls_ldm_got.offset = -1;
11757 /* Allocate global sym .plt and .got entries, and space for global
11758 sym dynamic relocs. */
11759 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11761 /* Here we rummage through the found bfds to collect glue information. */
11762 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11764 if (! is_arm_elf (ibfd))
11767 /* Initialise mapping tables for code/data. */
11768 bfd_elf32_arm_init_maps (ibfd);
11770 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11771 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11772 /* xgettext:c-format */
11773 _bfd_error_handler (_("Errors encountered processing file %s"),
11777 /* Allocate space for the glue sections now that we've sized them. */
11778 bfd_elf32_arm_allocate_interworking_sections (info);
11780 /* The check_relocs and adjust_dynamic_symbol entry points have
11781 determined the sizes of the various dynamic sections. Allocate
11782 memory for them. */
11785 for (s = dynobj->sections; s != NULL; s = s->next)
11789 if ((s->flags & SEC_LINKER_CREATED) == 0)
11792 /* It's OK to base decisions on the section name, because none
11793 of the dynobj section names depend upon the input files. */
11794 name = bfd_get_section_name (dynobj, s);
11796 if (strcmp (name, ".plt") == 0)
11798 /* Remember whether there is a PLT. */
11799 plt = s->size != 0;
11801 else if (CONST_STRNEQ (name, ".rel"))
11805 /* Remember whether there are any reloc sections other
11806 than .rel(a).plt and .rela.plt.unloaded. */
11807 if (s != htab->srelplt && s != htab->srelplt2)
11810 /* We use the reloc_count field as a counter if we need
11811 to copy relocs into the output file. */
11812 s->reloc_count = 0;
11815 else if (! CONST_STRNEQ (name, ".got")
11816 && strcmp (name, ".dynbss") != 0)
11818 /* It's not one of our sections, so don't allocate space. */
11824 /* If we don't need this section, strip it from the
11825 output file. This is mostly to handle .rel(a).bss and
11826 .rel(a).plt. We must create both sections in
11827 create_dynamic_sections, because they must be created
11828 before the linker maps input sections to output
11829 sections. The linker does that before
11830 adjust_dynamic_symbol is called, and it is that
11831 function which decides whether anything needs to go
11832 into these sections. */
11833 s->flags |= SEC_EXCLUDE;
11837 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11840 /* Allocate memory for the section contents. */
11841 s->contents = bfd_zalloc (dynobj, s->size);
11842 if (s->contents == NULL)
11846 if (elf_hash_table (info)->dynamic_sections_created)
11848 /* Add some entries to the .dynamic section. We fill in the
11849 values later, in elf32_arm_finish_dynamic_sections, but we
11850 must add the entries now so that we get the correct size for
11851 the .dynamic section. The DT_DEBUG entry is filled in by the
11852 dynamic linker and used by the debugger. */
11853 #define add_dynamic_entry(TAG, VAL) \
11854 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11856 if (info->executable)
11858 if (!add_dynamic_entry (DT_DEBUG, 0))
11864 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11865 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11866 || !add_dynamic_entry (DT_PLTREL,
11867 htab->use_rel ? DT_REL : DT_RELA)
11868 || !add_dynamic_entry (DT_JMPREL, 0))
11876 if (!add_dynamic_entry (DT_REL, 0)
11877 || !add_dynamic_entry (DT_RELSZ, 0)
11878 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11883 if (!add_dynamic_entry (DT_RELA, 0)
11884 || !add_dynamic_entry (DT_RELASZ, 0)
11885 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11890 /* If any dynamic relocs apply to a read-only section,
11891 then we need a DT_TEXTREL entry. */
11892 if ((info->flags & DF_TEXTREL) == 0)
11893 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11896 if ((info->flags & DF_TEXTREL) != 0)
11898 if (!add_dynamic_entry (DT_TEXTREL, 0))
11901 if (htab->vxworks_p
11902 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11905 #undef add_dynamic_entry
11910 /* Finish up dynamic symbol handling. We set the contents of various
11911 dynamic sections here. */
11914 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11915 struct bfd_link_info * info,
11916 struct elf_link_hash_entry * h,
11917 Elf_Internal_Sym * sym)
11920 struct elf32_arm_link_hash_table *htab;
11921 struct elf32_arm_link_hash_entry *eh;
11923 dynobj = elf_hash_table (info)->dynobj;
11924 htab = elf32_arm_hash_table (info);
11925 eh = (struct elf32_arm_link_hash_entry *) h;
11927 if (h->plt.offset != (bfd_vma) -1)
11933 Elf_Internal_Rela rel;
11935 /* This symbol has an entry in the procedure linkage table. Set
11938 BFD_ASSERT (h->dynindx != -1);
11940 splt = bfd_get_section_by_name (dynobj, ".plt");
11941 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11942 BFD_ASSERT (splt != NULL && srel != NULL);
11944 /* Fill in the entry in the procedure linkage table. */
11945 if (htab->symbian_p)
11947 put_arm_insn (htab, output_bfd,
11948 elf32_arm_symbian_plt_entry[0],
11949 splt->contents + h->plt.offset);
11950 bfd_put_32 (output_bfd,
11951 elf32_arm_symbian_plt_entry[1],
11952 splt->contents + h->plt.offset + 4);
11954 /* Fill in the entry in the .rel.plt section. */
11955 rel.r_offset = (splt->output_section->vma
11956 + splt->output_offset
11957 + h->plt.offset + 4);
11958 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11960 /* Get the index in the procedure linkage table which
11961 corresponds to this symbol. This is the index of this symbol
11962 in all the symbols for which we are making plt entries. The
11963 first entry in the procedure linkage table is reserved. */
11964 plt_index = ((h->plt.offset - htab->plt_header_size)
11965 / htab->plt_entry_size);
11969 bfd_vma got_offset, got_address, plt_address;
11970 bfd_vma got_displacement;
11974 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11975 BFD_ASSERT (sgot != NULL);
11977 /* Get the offset into the .got.plt table of the entry that
11978 corresponds to this function. */
11979 got_offset = eh->plt_got_offset;
11981 /* Get the index in the procedure linkage table which
11982 corresponds to this symbol. This is the index of this symbol
11983 in all the symbols for which we are making plt entries. The
11984 first three entries in .got.plt are reserved; after that
11985 symbols appear in the same order as in .plt. */
11986 plt_index = (got_offset - 12) / 4;
11988 /* Calculate the address of the GOT entry. */
11989 got_address = (sgot->output_section->vma
11990 + sgot->output_offset
11993 /* ...and the address of the PLT entry. */
11994 plt_address = (splt->output_section->vma
11995 + splt->output_offset
11998 ptr = htab->splt->contents + h->plt.offset;
11999 if (htab->vxworks_p && info->shared)
12004 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12006 val = elf32_arm_vxworks_shared_plt_entry[i];
12008 val |= got_address - sgot->output_section->vma;
12010 val |= plt_index * RELOC_SIZE (htab);
12011 if (i == 2 || i == 5)
12012 bfd_put_32 (output_bfd, val, ptr);
12014 put_arm_insn (htab, output_bfd, val, ptr);
12017 else if (htab->vxworks_p)
12022 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12024 val = elf32_arm_vxworks_exec_plt_entry[i];
12026 val |= got_address;
12028 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12030 val |= plt_index * RELOC_SIZE (htab);
12031 if (i == 2 || i == 5)
12032 bfd_put_32 (output_bfd, val, ptr);
12034 put_arm_insn (htab, output_bfd, val, ptr);
12037 loc = (htab->srelplt2->contents
12038 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12040 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12041 referencing the GOT for this PLT entry. */
12042 rel.r_offset = plt_address + 8;
12043 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12044 rel.r_addend = got_offset;
12045 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12046 loc += RELOC_SIZE (htab);
12048 /* Create the R_ARM_ABS32 relocation referencing the
12049 beginning of the PLT for this GOT entry. */
12050 rel.r_offset = got_address;
12051 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12053 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12057 bfd_signed_vma thumb_refs;
12058 /* Calculate the displacement between the PLT slot and the
12059 entry in the GOT. The eight-byte offset accounts for the
12060 value produced by adding to pc in the first instruction
12061 of the PLT stub. */
12062 got_displacement = got_address - (plt_address + 8);
12064 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12066 thumb_refs = eh->plt_thumb_refcount;
12067 if (!htab->use_blx)
12068 thumb_refs += eh->plt_maybe_thumb_refcount;
12070 if (thumb_refs > 0)
12072 put_thumb_insn (htab, output_bfd,
12073 elf32_arm_plt_thumb_stub[0], ptr - 4);
12074 put_thumb_insn (htab, output_bfd,
12075 elf32_arm_plt_thumb_stub[1], ptr - 2);
12078 put_arm_insn (htab, output_bfd,
12079 elf32_arm_plt_entry[0]
12080 | ((got_displacement & 0x0ff00000) >> 20),
12082 put_arm_insn (htab, output_bfd,
12083 elf32_arm_plt_entry[1]
12084 | ((got_displacement & 0x000ff000) >> 12),
12086 put_arm_insn (htab, output_bfd,
12087 elf32_arm_plt_entry[2]
12088 | (got_displacement & 0x00000fff),
12090 #ifdef FOUR_WORD_PLT
12091 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12095 /* Fill in the entry in the global offset table. */
12096 bfd_put_32 (output_bfd,
12097 (splt->output_section->vma
12098 + splt->output_offset),
12099 sgot->contents + got_offset);
12101 /* Fill in the entry in the .rel(a).plt section. */
12103 rel.r_offset = got_address;
12104 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12107 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12108 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12110 if (!h->def_regular)
12112 /* Mark the symbol as undefined, rather than as defined in
12113 the .plt section. Leave the value alone. */
12114 sym->st_shndx = SHN_UNDEF;
12115 /* If the symbol is weak, we do need to clear the value.
12116 Otherwise, the PLT entry would provide a definition for
12117 the symbol even if the symbol wasn't defined anywhere,
12118 and so the symbol would never be NULL. */
12119 if (!h->ref_regular_nonweak)
12124 if (h->got.offset != (bfd_vma) -1
12125 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12126 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12130 Elf_Internal_Rela rel;
12134 /* This symbol has an entry in the global offset table. Set it
12136 sgot = bfd_get_section_by_name (dynobj, ".got");
12137 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12138 BFD_ASSERT (sgot != NULL && srel != NULL);
12140 offset = (h->got.offset & ~(bfd_vma) 1);
12142 rel.r_offset = (sgot->output_section->vma
12143 + sgot->output_offset
12146 /* If this is a static link, or it is a -Bsymbolic link and the
12147 symbol is defined locally or was forced to be local because
12148 of a version file, we just want to emit a RELATIVE reloc.
12149 The entry in the global offset table will already have been
12150 initialized in the relocate_section function. */
12152 && SYMBOL_REFERENCES_LOCAL (info, h))
12154 BFD_ASSERT ((h->got.offset & 1) != 0);
12155 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12156 if (!htab->use_rel)
12158 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12159 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12164 BFD_ASSERT ((h->got.offset & 1) == 0);
12165 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12166 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12169 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12170 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12176 Elf_Internal_Rela rel;
12179 /* This symbol needs a copy reloc. Set it up. */
12180 BFD_ASSERT (h->dynindx != -1
12181 && (h->root.type == bfd_link_hash_defined
12182 || h->root.type == bfd_link_hash_defweak));
12184 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12185 RELOC_SECTION (htab, ".bss"));
12186 BFD_ASSERT (s != NULL);
12189 rel.r_offset = (h->root.u.def.value
12190 + h->root.u.def.section->output_section->vma
12191 + h->root.u.def.section->output_offset);
12192 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12193 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12194 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12197 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12198 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12199 to the ".got" section. */
12200 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12201 || (!htab->vxworks_p && h == htab->root.hgot))
12202 sym->st_shndx = SHN_ABS;
12207 /* Finish up the dynamic sections. */
12210 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12216 dynobj = elf_hash_table (info)->dynobj;
12218 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12219 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12220 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12222 if (elf_hash_table (info)->dynamic_sections_created)
12225 Elf32_External_Dyn *dyncon, *dynconend;
12226 struct elf32_arm_link_hash_table *htab;
12228 htab = elf32_arm_hash_table (info);
12229 splt = bfd_get_section_by_name (dynobj, ".plt");
12230 BFD_ASSERT (splt != NULL && sdyn != NULL);
12232 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12233 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12235 for (; dyncon < dynconend; dyncon++)
12237 Elf_Internal_Dyn dyn;
12241 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12248 if (htab->vxworks_p
12249 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12250 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12255 goto get_vma_if_bpabi;
12258 goto get_vma_if_bpabi;
12261 goto get_vma_if_bpabi;
12263 name = ".gnu.version";
12264 goto get_vma_if_bpabi;
12266 name = ".gnu.version_d";
12267 goto get_vma_if_bpabi;
12269 name = ".gnu.version_r";
12270 goto get_vma_if_bpabi;
12276 name = RELOC_SECTION (htab, ".plt");
12278 s = bfd_get_section_by_name (output_bfd, name);
12279 BFD_ASSERT (s != NULL);
12280 if (!htab->symbian_p)
12281 dyn.d_un.d_ptr = s->vma;
12283 /* In the BPABI, tags in the PT_DYNAMIC section point
12284 at the file offset, not the memory address, for the
12285 convenience of the post linker. */
12286 dyn.d_un.d_ptr = s->filepos;
12287 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12291 if (htab->symbian_p)
12296 s = bfd_get_section_by_name (output_bfd,
12297 RELOC_SECTION (htab, ".plt"));
12298 BFD_ASSERT (s != NULL);
12299 dyn.d_un.d_val = s->size;
12300 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12305 if (!htab->symbian_p)
12307 /* My reading of the SVR4 ABI indicates that the
12308 procedure linkage table relocs (DT_JMPREL) should be
12309 included in the overall relocs (DT_REL). This is
12310 what Solaris does. However, UnixWare can not handle
12311 that case. Therefore, we override the DT_RELSZ entry
12312 here to make it not include the JMPREL relocs. Since
12313 the linker script arranges for .rel(a).plt to follow all
12314 other relocation sections, we don't have to worry
12315 about changing the DT_REL entry. */
12316 s = bfd_get_section_by_name (output_bfd,
12317 RELOC_SECTION (htab, ".plt"));
12319 dyn.d_un.d_val -= s->size;
12320 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12323 /* Fall through. */
12327 /* In the BPABI, the DT_REL tag must point at the file
12328 offset, not the VMA, of the first relocation
12329 section. So, we use code similar to that in
12330 elflink.c, but do not check for SHF_ALLOC on the
12331 relcoation section, since relocations sections are
12332 never allocated under the BPABI. The comments above
12333 about Unixware notwithstanding, we include all of the
12334 relocations here. */
12335 if (htab->symbian_p)
12338 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12339 ? SHT_REL : SHT_RELA);
12340 dyn.d_un.d_val = 0;
12341 for (i = 1; i < elf_numsections (output_bfd); i++)
12343 Elf_Internal_Shdr *hdr
12344 = elf_elfsections (output_bfd)[i];
12345 if (hdr->sh_type == type)
12347 if (dyn.d_tag == DT_RELSZ
12348 || dyn.d_tag == DT_RELASZ)
12349 dyn.d_un.d_val += hdr->sh_size;
12350 else if ((ufile_ptr) hdr->sh_offset
12351 <= dyn.d_un.d_val - 1)
12352 dyn.d_un.d_val = hdr->sh_offset;
12355 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12359 /* Set the bottom bit of DT_INIT/FINI if the
12360 corresponding function is Thumb. */
12362 name = info->init_function;
12365 name = info->fini_function;
12367 /* If it wasn't set by elf_bfd_final_link
12368 then there is nothing to adjust. */
12369 if (dyn.d_un.d_val != 0)
12371 struct elf_link_hash_entry * eh;
12373 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12374 FALSE, FALSE, TRUE);
12376 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12378 dyn.d_un.d_val |= 1;
12379 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12386 /* Fill in the first entry in the procedure linkage table. */
12387 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12389 const bfd_vma *plt0_entry;
12390 bfd_vma got_address, plt_address, got_displacement;
12392 /* Calculate the addresses of the GOT and PLT. */
12393 got_address = sgot->output_section->vma + sgot->output_offset;
12394 plt_address = splt->output_section->vma + splt->output_offset;
12396 if (htab->vxworks_p)
12398 /* The VxWorks GOT is relocated by the dynamic linker.
12399 Therefore, we must emit relocations rather than simply
12400 computing the values now. */
12401 Elf_Internal_Rela rel;
12403 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12404 put_arm_insn (htab, output_bfd, plt0_entry[0],
12405 splt->contents + 0);
12406 put_arm_insn (htab, output_bfd, plt0_entry[1],
12407 splt->contents + 4);
12408 put_arm_insn (htab, output_bfd, plt0_entry[2],
12409 splt->contents + 8);
12410 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12412 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12413 rel.r_offset = plt_address + 12;
12414 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12416 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12417 htab->srelplt2->contents);
12421 got_displacement = got_address - (plt_address + 16);
12423 plt0_entry = elf32_arm_plt0_entry;
12424 put_arm_insn (htab, output_bfd, plt0_entry[0],
12425 splt->contents + 0);
12426 put_arm_insn (htab, output_bfd, plt0_entry[1],
12427 splt->contents + 4);
12428 put_arm_insn (htab, output_bfd, plt0_entry[2],
12429 splt->contents + 8);
12430 put_arm_insn (htab, output_bfd, plt0_entry[3],
12431 splt->contents + 12);
12433 #ifdef FOUR_WORD_PLT
12434 /* The displacement value goes in the otherwise-unused
12435 last word of the second entry. */
12436 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12438 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12443 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12444 really seem like the right value. */
12445 if (splt->output_section->owner == output_bfd)
12446 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12448 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12450 /* Correct the .rel(a).plt.unloaded relocations. They will have
12451 incorrect symbol indexes. */
12455 num_plts = ((htab->splt->size - htab->plt_header_size)
12456 / htab->plt_entry_size);
12457 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12459 for (; num_plts; num_plts--)
12461 Elf_Internal_Rela rel;
12463 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12464 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12465 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12466 p += RELOC_SIZE (htab);
12468 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12469 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12470 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12471 p += RELOC_SIZE (htab);
12476 /* Fill in the first three entries in the global offset table. */
12479 if (sgot->size > 0)
12482 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12484 bfd_put_32 (output_bfd,
12485 sdyn->output_section->vma + sdyn->output_offset,
12487 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12488 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12491 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12498 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12500 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12501 struct elf32_arm_link_hash_table *globals;
12503 i_ehdrp = elf_elfheader (abfd);
12505 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12506 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12508 i_ehdrp->e_ident[EI_OSABI] = 0;
12509 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12513 globals = elf32_arm_hash_table (link_info);
12514 if (globals->byteswap_code)
12515 i_ehdrp->e_flags |= EF_ARM_BE8;
12519 static enum elf_reloc_type_class
12520 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12522 switch ((int) ELF32_R_TYPE (rela->r_info))
12524 case R_ARM_RELATIVE:
12525 return reloc_class_relative;
12526 case R_ARM_JUMP_SLOT:
12527 return reloc_class_plt;
12529 return reloc_class_copy;
12531 return reloc_class_normal;
12535 /* Set the right machine number for an Arm ELF file. */
12538 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12540 if (hdr->sh_type == SHT_NOTE)
12541 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12547 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12549 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12552 /* Return TRUE if this is an unwinding table entry. */
12555 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12557 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12558 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12562 /* Set the type and flags for an ARM section. We do this by
12563 the section name, which is a hack, but ought to work. */
12566 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12570 name = bfd_get_section_name (abfd, sec);
12572 if (is_arm_elf_unwind_section_name (abfd, name))
12574 hdr->sh_type = SHT_ARM_EXIDX;
12575 hdr->sh_flags |= SHF_LINK_ORDER;
12580 /* Handle an ARM specific section when reading an object file. This is
12581 called when bfd_section_from_shdr finds a section with an unknown
12585 elf32_arm_section_from_shdr (bfd *abfd,
12586 Elf_Internal_Shdr * hdr,
12590 /* There ought to be a place to keep ELF backend specific flags, but
12591 at the moment there isn't one. We just keep track of the
12592 sections by their name, instead. Fortunately, the ABI gives
12593 names for all the ARM specific sections, so we will probably get
12595 switch (hdr->sh_type)
12597 case SHT_ARM_EXIDX:
12598 case SHT_ARM_PREEMPTMAP:
12599 case SHT_ARM_ATTRIBUTES:
12606 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12612 /* A structure used to record a list of sections, independently
12613 of the next and prev fields in the asection structure. */
12614 typedef struct section_list
12617 struct section_list * next;
12618 struct section_list * prev;
12622 /* Unfortunately we need to keep a list of sections for which
12623 an _arm_elf_section_data structure has been allocated. This
12624 is because it is possible for functions like elf32_arm_write_section
12625 to be called on a section which has had an elf_data_structure
12626 allocated for it (and so the used_by_bfd field is valid) but
12627 for which the ARM extended version of this structure - the
12628 _arm_elf_section_data structure - has not been allocated. */
12629 static section_list * sections_with_arm_elf_section_data = NULL;
12632 record_section_with_arm_elf_section_data (asection * sec)
12634 struct section_list * entry;
12636 entry = bfd_malloc (sizeof (* entry));
12640 entry->next = sections_with_arm_elf_section_data;
12641 entry->prev = NULL;
12642 if (entry->next != NULL)
12643 entry->next->prev = entry;
12644 sections_with_arm_elf_section_data = entry;
12647 static struct section_list *
12648 find_arm_elf_section_entry (asection * sec)
12650 struct section_list * entry;
12651 static struct section_list * last_entry = NULL;
12653 /* This is a short cut for the typical case where the sections are added
12654 to the sections_with_arm_elf_section_data list in forward order and
12655 then looked up here in backwards order. This makes a real difference
12656 to the ld-srec/sec64k.exp linker test. */
12657 entry = sections_with_arm_elf_section_data;
12658 if (last_entry != NULL)
12660 if (last_entry->sec == sec)
12661 entry = last_entry;
12662 else if (last_entry->next != NULL
12663 && last_entry->next->sec == sec)
12664 entry = last_entry->next;
12667 for (; entry; entry = entry->next)
12668 if (entry->sec == sec)
12672 /* Record the entry prior to this one - it is the entry we are most
12673 likely to want to locate next time. Also this way if we have been
12674 called from unrecord_section_with_arm_elf_section_data() we will not
12675 be caching a pointer that is about to be freed. */
12676 last_entry = entry->prev;
12681 static _arm_elf_section_data *
12682 get_arm_elf_section_data (asection * sec)
12684 struct section_list * entry;
12686 entry = find_arm_elf_section_entry (sec);
12689 return elf32_arm_section_data (entry->sec);
12695 unrecord_section_with_arm_elf_section_data (asection * sec)
12697 struct section_list * entry;
12699 entry = find_arm_elf_section_entry (sec);
12703 if (entry->prev != NULL)
12704 entry->prev->next = entry->next;
12705 if (entry->next != NULL)
12706 entry->next->prev = entry->prev;
12707 if (entry == sections_with_arm_elf_section_data)
12708 sections_with_arm_elf_section_data = entry->next;
12717 struct bfd_link_info *info;
12720 int (*func) (void *, const char *, Elf_Internal_Sym *,
12721 asection *, struct elf_link_hash_entry *);
12722 } output_arch_syminfo;
12724 enum map_symbol_type
12732 /* Output a single mapping symbol. */
12735 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12736 enum map_symbol_type type,
12739 static const char *names[3] = {"$a", "$t", "$d"};
12740 struct elf32_arm_link_hash_table *htab;
12741 Elf_Internal_Sym sym;
12743 htab = elf32_arm_hash_table (osi->info);
12744 sym.st_value = osi->sec->output_section->vma
12745 + osi->sec->output_offset
12749 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12750 sym.st_shndx = osi->sec_shndx;
12751 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12755 /* Output mapping symbols for PLT entries associated with H. */
12758 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12760 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12761 struct elf32_arm_link_hash_table *htab;
12762 struct elf32_arm_link_hash_entry *eh;
12765 htab = elf32_arm_hash_table (osi->info);
12767 if (h->root.type == bfd_link_hash_indirect)
12770 if (h->root.type == bfd_link_hash_warning)
12771 /* When warning symbols are created, they **replace** the "real"
12772 entry in the hash table, thus we never get to see the real
12773 symbol in a hash traversal. So look at it now. */
12774 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12776 if (h->plt.offset == (bfd_vma) -1)
12779 eh = (struct elf32_arm_link_hash_entry *) h;
12780 addr = h->plt.offset;
12781 if (htab->symbian_p)
12783 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12785 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12788 else if (htab->vxworks_p)
12790 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12792 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12794 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12796 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12801 bfd_signed_vma thumb_refs;
12803 thumb_refs = eh->plt_thumb_refcount;
12804 if (!htab->use_blx)
12805 thumb_refs += eh->plt_maybe_thumb_refcount;
12807 if (thumb_refs > 0)
12809 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12812 #ifdef FOUR_WORD_PLT
12813 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12815 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12818 /* A three-word PLT with no Thumb thunk contains only Arm code,
12819 so only need to output a mapping symbol for the first PLT entry and
12820 entries with thumb thunks. */
12821 if (thumb_refs > 0 || addr == 20)
12823 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12832 /* Output a single local symbol for a generated stub. */
12835 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12836 bfd_vma offset, bfd_vma size)
12838 struct elf32_arm_link_hash_table *htab;
12839 Elf_Internal_Sym sym;
12841 htab = elf32_arm_hash_table (osi->info);
12842 sym.st_value = osi->sec->output_section->vma
12843 + osi->sec->output_offset
12845 sym.st_size = size;
12847 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12848 sym.st_shndx = osi->sec_shndx;
12849 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12853 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12856 struct elf32_arm_stub_hash_entry *stub_entry;
12857 struct bfd_link_info *info;
12858 struct elf32_arm_link_hash_table *htab;
12859 asection *stub_sec;
12862 output_arch_syminfo *osi;
12863 const insn_sequence *template;
12864 enum stub_insn_type prev_type;
12867 enum map_symbol_type sym_type;
12869 /* Massage our args to the form they really have. */
12870 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12871 osi = (output_arch_syminfo *) in_arg;
12875 htab = elf32_arm_hash_table (info);
12876 stub_sec = stub_entry->stub_sec;
12878 /* Ensure this stub is attached to the current section being
12880 if (stub_sec != osi->sec)
12883 addr = (bfd_vma) stub_entry->stub_offset;
12884 stub_name = stub_entry->output_name;
12886 template = stub_entry->stub_template;
12887 switch (template[0].type)
12890 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12895 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12896 stub_entry->stub_size))
12904 prev_type = DATA_TYPE;
12906 for (i = 0; i < stub_entry->stub_template_size; i++)
12908 switch (template[i].type)
12911 sym_type = ARM_MAP_ARM;
12916 sym_type = ARM_MAP_THUMB;
12920 sym_type = ARM_MAP_DATA;
12928 if (template[i].type != prev_type)
12930 prev_type = template[i].type;
12931 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12935 switch (template[i].type)
12959 /* Output mapping symbols for linker generated sections. */
12962 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12963 struct bfd_link_info *info,
12965 int (*func) (void *, const char *,
12966 Elf_Internal_Sym *,
12968 struct elf_link_hash_entry *))
12970 output_arch_syminfo osi;
12971 struct elf32_arm_link_hash_table *htab;
12973 bfd_size_type size;
12975 htab = elf32_arm_hash_table (info);
12976 check_use_blx (htab);
12982 /* ARM->Thumb glue. */
12983 if (htab->arm_glue_size > 0)
12985 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12986 ARM2THUMB_GLUE_SECTION_NAME);
12988 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12989 (output_bfd, osi.sec->output_section);
12990 if (info->shared || htab->root.is_relocatable_executable
12991 || htab->pic_veneer)
12992 size = ARM2THUMB_PIC_GLUE_SIZE;
12993 else if (htab->use_blx)
12994 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12996 size = ARM2THUMB_STATIC_GLUE_SIZE;
12998 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13000 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13001 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13005 /* Thumb->ARM glue. */
13006 if (htab->thumb_glue_size > 0)
13008 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13009 THUMB2ARM_GLUE_SECTION_NAME);
13011 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13012 (output_bfd, osi.sec->output_section);
13013 size = THUMB2ARM_GLUE_SIZE;
13015 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13017 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13018 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13022 /* ARMv4 BX veneers. */
13023 if (htab->bx_glue_size > 0)
13025 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13026 ARM_BX_GLUE_SECTION_NAME);
13028 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13029 (output_bfd, osi.sec->output_section);
13031 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13034 /* Long calls stubs. */
13035 if (htab->stub_bfd && htab->stub_bfd->sections)
13037 asection* stub_sec;
13039 for (stub_sec = htab->stub_bfd->sections;
13041 stub_sec = stub_sec->next)
13043 /* Ignore non-stub sections. */
13044 if (!strstr (stub_sec->name, STUB_SUFFIX))
13047 osi.sec = stub_sec;
13049 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13050 (output_bfd, osi.sec->output_section);
13052 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13056 /* Finally, output mapping symbols for the PLT. */
13057 if (!htab->splt || htab->splt->size == 0)
13060 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13061 htab->splt->output_section);
13062 osi.sec = htab->splt;
13063 /* Output mapping symbols for the plt header. SymbianOS does not have a
13065 if (htab->vxworks_p)
13067 /* VxWorks shared libraries have no PLT header. */
13070 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13072 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13076 else if (!htab->symbian_p)
13078 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13080 #ifndef FOUR_WORD_PLT
13081 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13086 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13090 /* Allocate target specific section data. */
13093 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13095 if (!sec->used_by_bfd)
13097 _arm_elf_section_data *sdata;
13098 bfd_size_type amt = sizeof (*sdata);
13100 sdata = bfd_zalloc (abfd, amt);
13103 sec->used_by_bfd = sdata;
13106 record_section_with_arm_elf_section_data (sec);
13108 return _bfd_elf_new_section_hook (abfd, sec);
13112 /* Used to order a list of mapping symbols by address. */
13115 elf32_arm_compare_mapping (const void * a, const void * b)
13117 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13118 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13120 if (amap->vma > bmap->vma)
13122 else if (amap->vma < bmap->vma)
13124 else if (amap->type > bmap->type)
13125 /* Ensure results do not depend on the host qsort for objects with
13126 multiple mapping symbols at the same address by sorting on type
13129 else if (amap->type < bmap->type)
13135 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13137 static unsigned long
13138 offset_prel31 (unsigned long addr, bfd_vma offset)
13140 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13143 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13147 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13149 unsigned long first_word = bfd_get_32 (output_bfd, from);
13150 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13152 /* High bit of first word is supposed to be zero. */
13153 if ((first_word & 0x80000000ul) == 0)
13154 first_word = offset_prel31 (first_word, offset);
13156 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13157 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13158 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13159 second_word = offset_prel31 (second_word, offset);
13161 bfd_put_32 (output_bfd, first_word, to);
13162 bfd_put_32 (output_bfd, second_word, to + 4);
13165 /* Data for make_branch_to_a8_stub(). */
13167 struct a8_branch_to_stub_data {
13168 asection *writing_section;
13169 bfd_byte *contents;
13173 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13174 places for a particular section. */
13177 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13180 struct elf32_arm_stub_hash_entry *stub_entry;
13181 struct a8_branch_to_stub_data *data;
13182 bfd_byte *contents;
13183 unsigned long branch_insn;
13184 bfd_vma veneered_insn_loc, veneer_entry_loc;
13185 bfd_signed_vma branch_offset;
13187 unsigned int index;
13189 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13190 data = (struct a8_branch_to_stub_data *) in_arg;
13192 if (stub_entry->target_section != data->writing_section
13193 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13196 contents = data->contents;
13198 veneered_insn_loc = stub_entry->target_section->output_section->vma
13199 + stub_entry->target_section->output_offset
13200 + stub_entry->target_value;
13202 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13203 + stub_entry->stub_sec->output_offset
13204 + stub_entry->stub_offset;
13206 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13207 veneered_insn_loc &= ~3u;
13209 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13211 abfd = stub_entry->target_section->owner;
13212 index = stub_entry->target_value;
13214 /* We attempt to avoid this condition by setting stubs_always_after_branch
13215 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13216 This check is just to be on the safe side... */
13217 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13219 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13220 "allocated in unsafe location"), abfd);
13224 switch (stub_entry->stub_type)
13226 case arm_stub_a8_veneer_b:
13227 case arm_stub_a8_veneer_b_cond:
13228 branch_insn = 0xf0009000;
13231 case arm_stub_a8_veneer_blx:
13232 branch_insn = 0xf000e800;
13235 case arm_stub_a8_veneer_bl:
13237 unsigned int i1, j1, i2, j2, s;
13239 branch_insn = 0xf000d000;
13242 if (branch_offset < -16777216 || branch_offset > 16777214)
13244 /* There's not much we can do apart from complain if this
13246 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13247 "of range (input file too large)"), abfd);
13251 /* i1 = not(j1 eor s), so:
13253 j1 = (not i1) eor s. */
13255 branch_insn |= (branch_offset >> 1) & 0x7ff;
13256 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13257 i2 = (branch_offset >> 22) & 1;
13258 i1 = (branch_offset >> 23) & 1;
13259 s = (branch_offset >> 24) & 1;
13262 branch_insn |= j2 << 11;
13263 branch_insn |= j1 << 13;
13264 branch_insn |= s << 26;
13273 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13274 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13279 /* Do code byteswapping. Return FALSE afterwards so that the section is
13280 written out as normal. */
13283 elf32_arm_write_section (bfd *output_bfd,
13284 struct bfd_link_info *link_info,
13286 bfd_byte *contents)
13288 unsigned int mapcount, errcount;
13289 _arm_elf_section_data *arm_data;
13290 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13291 elf32_arm_section_map *map;
13292 elf32_vfp11_erratum_list *errnode;
13295 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13299 /* If this section has not been allocated an _arm_elf_section_data
13300 structure then we cannot record anything. */
13301 arm_data = get_arm_elf_section_data (sec);
13302 if (arm_data == NULL)
13305 mapcount = arm_data->mapcount;
13306 map = arm_data->map;
13307 errcount = arm_data->erratumcount;
13311 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13313 for (errnode = arm_data->erratumlist; errnode != 0;
13314 errnode = errnode->next)
13316 bfd_vma index = errnode->vma - offset;
13318 switch (errnode->type)
13320 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13322 bfd_vma branch_to_veneer;
13323 /* Original condition code of instruction, plus bit mask for
13324 ARM B instruction. */
13325 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13328 /* The instruction is before the label. */
13331 /* Above offset included in -4 below. */
13332 branch_to_veneer = errnode->u.b.veneer->vma
13333 - errnode->vma - 4;
13335 if ((signed) branch_to_veneer < -(1 << 25)
13336 || (signed) branch_to_veneer >= (1 << 25))
13337 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13338 "range"), output_bfd);
13340 insn |= (branch_to_veneer >> 2) & 0xffffff;
13341 contents[endianflip ^ index] = insn & 0xff;
13342 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13343 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13344 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13348 case VFP11_ERRATUM_ARM_VENEER:
13350 bfd_vma branch_from_veneer;
13353 /* Take size of veneer into account. */
13354 branch_from_veneer = errnode->u.v.branch->vma
13355 - errnode->vma - 12;
13357 if ((signed) branch_from_veneer < -(1 << 25)
13358 || (signed) branch_from_veneer >= (1 << 25))
13359 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13360 "range"), output_bfd);
13362 /* Original instruction. */
13363 insn = errnode->u.v.branch->u.b.vfp_insn;
13364 contents[endianflip ^ index] = insn & 0xff;
13365 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13366 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13367 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13369 /* Branch back to insn after original insn. */
13370 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13371 contents[endianflip ^ (index + 4)] = insn & 0xff;
13372 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13373 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13374 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13384 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13386 arm_unwind_table_edit *edit_node
13387 = arm_data->u.exidx.unwind_edit_list;
13388 /* Now, sec->size is the size of the section we will write. The original
13389 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13390 markers) was sec->rawsize. (This isn't the case if we perform no
13391 edits, then rawsize will be zero and we should use size). */
13392 bfd_byte *edited_contents = bfd_malloc (sec->size);
13393 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13394 unsigned int in_index, out_index;
13395 bfd_vma add_to_offsets = 0;
13397 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13401 unsigned int edit_index = edit_node->index;
13403 if (in_index < edit_index && in_index * 8 < input_size)
13405 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13406 contents + in_index * 8, add_to_offsets);
13410 else if (in_index == edit_index
13411 || (in_index * 8 >= input_size
13412 && edit_index == UINT_MAX))
13414 switch (edit_node->type)
13416 case DELETE_EXIDX_ENTRY:
13418 add_to_offsets += 8;
13421 case INSERT_EXIDX_CANTUNWIND_AT_END:
13423 asection *text_sec = edit_node->linked_section;
13424 bfd_vma text_offset = text_sec->output_section->vma
13425 + text_sec->output_offset
13427 bfd_vma exidx_offset = offset + out_index * 8;
13428 unsigned long prel31_offset;
13430 /* Note: this is meant to be equivalent to an
13431 R_ARM_PREL31 relocation. These synthetic
13432 EXIDX_CANTUNWIND markers are not relocated by the
13433 usual BFD method. */
13434 prel31_offset = (text_offset - exidx_offset)
13437 /* First address we can't unwind. */
13438 bfd_put_32 (output_bfd, prel31_offset,
13439 &edited_contents[out_index * 8]);
13441 /* Code for EXIDX_CANTUNWIND. */
13442 bfd_put_32 (output_bfd, 0x1,
13443 &edited_contents[out_index * 8 + 4]);
13446 add_to_offsets -= 8;
13451 edit_node = edit_node->next;
13456 /* No more edits, copy remaining entries verbatim. */
13457 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13458 contents + in_index * 8, add_to_offsets);
13464 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13465 bfd_set_section_contents (output_bfd, sec->output_section,
13467 (file_ptr) sec->output_offset, sec->size);
13472 /* Fix code to point to Cortex-A8 erratum stubs. */
13473 if (globals->fix_cortex_a8)
13475 struct a8_branch_to_stub_data data;
13477 data.writing_section = sec;
13478 data.contents = contents;
13480 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13487 if (globals->byteswap_code)
13489 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13492 for (i = 0; i < mapcount; i++)
13494 if (i == mapcount - 1)
13497 end = map[i + 1].vma;
13499 switch (map[i].type)
13502 /* Byte swap code words. */
13503 while (ptr + 3 < end)
13505 tmp = contents[ptr];
13506 contents[ptr] = contents[ptr + 3];
13507 contents[ptr + 3] = tmp;
13508 tmp = contents[ptr + 1];
13509 contents[ptr + 1] = contents[ptr + 2];
13510 contents[ptr + 2] = tmp;
13516 /* Byte swap code halfwords. */
13517 while (ptr + 1 < end)
13519 tmp = contents[ptr];
13520 contents[ptr] = contents[ptr + 1];
13521 contents[ptr + 1] = tmp;
13527 /* Leave data alone. */
13535 arm_data->mapcount = 0;
13536 arm_data->mapsize = 0;
13537 arm_data->map = NULL;
13538 unrecord_section_with_arm_elf_section_data (sec);
13544 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13546 void * ignore ATTRIBUTE_UNUSED)
13548 unrecord_section_with_arm_elf_section_data (sec);
13552 elf32_arm_close_and_cleanup (bfd * abfd)
13554 if (abfd->sections)
13555 bfd_map_over_sections (abfd,
13556 unrecord_section_via_map_over_sections,
13559 return _bfd_elf_close_and_cleanup (abfd);
13563 elf32_arm_bfd_free_cached_info (bfd * abfd)
13565 if (abfd->sections)
13566 bfd_map_over_sections (abfd,
13567 unrecord_section_via_map_over_sections,
13570 return _bfd_free_cached_info (abfd);
13573 /* Display STT_ARM_TFUNC symbols as functions. */
13576 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13579 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13581 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13582 elfsym->symbol.flags |= BSF_FUNCTION;
13586 /* Mangle thumb function symbols as we read them in. */
13589 elf32_arm_swap_symbol_in (bfd * abfd,
13592 Elf_Internal_Sym *dst)
13594 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13597 /* New EABI objects mark thumb function symbols by setting the low bit of
13598 the address. Turn these into STT_ARM_TFUNC. */
13599 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13600 && (dst->st_value & 1))
13602 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13603 dst->st_value &= ~(bfd_vma) 1;
13609 /* Mangle thumb function symbols as we write them out. */
13612 elf32_arm_swap_symbol_out (bfd *abfd,
13613 const Elf_Internal_Sym *src,
13617 Elf_Internal_Sym newsym;
13619 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13620 of the address set, as per the new EABI. We do this unconditionally
13621 because objcopy does not set the elf header flags until after
13622 it writes out the symbol table. */
13623 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13626 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13627 if (newsym.st_shndx != SHN_UNDEF)
13629 /* Do this only for defined symbols. At link type, the static
13630 linker will simulate the work of dynamic linker of resolving
13631 symbols and will carry over the thumbness of found symbols to
13632 the output symbol table. It's not clear how it happens, but
13633 the thumbness of undefined symbols can well be different at
13634 runtime, and writing '1' for them will be confusing for users
13635 and possibly for dynamic linker itself.
13637 newsym.st_value |= 1;
13642 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13645 /* Add the PT_ARM_EXIDX program header. */
13648 elf32_arm_modify_segment_map (bfd *abfd,
13649 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13651 struct elf_segment_map *m;
13654 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13655 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13657 /* If there is already a PT_ARM_EXIDX header, then we do not
13658 want to add another one. This situation arises when running
13659 "strip"; the input binary already has the header. */
13660 m = elf_tdata (abfd)->segment_map;
13661 while (m && m->p_type != PT_ARM_EXIDX)
13665 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13668 m->p_type = PT_ARM_EXIDX;
13670 m->sections[0] = sec;
13672 m->next = elf_tdata (abfd)->segment_map;
13673 elf_tdata (abfd)->segment_map = m;
13680 /* We may add a PT_ARM_EXIDX program header. */
13683 elf32_arm_additional_program_headers (bfd *abfd,
13684 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13688 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13689 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13695 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13698 elf32_arm_is_function_type (unsigned int type)
13700 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13703 /* We use this to override swap_symbol_in and swap_symbol_out. */
13704 const struct elf_size_info elf32_arm_size_info =
13706 sizeof (Elf32_External_Ehdr),
13707 sizeof (Elf32_External_Phdr),
13708 sizeof (Elf32_External_Shdr),
13709 sizeof (Elf32_External_Rel),
13710 sizeof (Elf32_External_Rela),
13711 sizeof (Elf32_External_Sym),
13712 sizeof (Elf32_External_Dyn),
13713 sizeof (Elf_External_Note),
13717 ELFCLASS32, EV_CURRENT,
13718 bfd_elf32_write_out_phdrs,
13719 bfd_elf32_write_shdrs_and_ehdr,
13720 bfd_elf32_checksum_contents,
13721 bfd_elf32_write_relocs,
13722 elf32_arm_swap_symbol_in,
13723 elf32_arm_swap_symbol_out,
13724 bfd_elf32_slurp_reloc_table,
13725 bfd_elf32_slurp_symbol_table,
13726 bfd_elf32_swap_dyn_in,
13727 bfd_elf32_swap_dyn_out,
13728 bfd_elf32_swap_reloc_in,
13729 bfd_elf32_swap_reloc_out,
13730 bfd_elf32_swap_reloca_in,
13731 bfd_elf32_swap_reloca_out
13734 #define ELF_ARCH bfd_arch_arm
13735 #define ELF_MACHINE_CODE EM_ARM
13736 #ifdef __QNXTARGET__
13737 #define ELF_MAXPAGESIZE 0x1000
13739 #define ELF_MAXPAGESIZE 0x8000
13741 #define ELF_MINPAGESIZE 0x1000
13742 #define ELF_COMMONPAGESIZE 0x1000
13744 #define bfd_elf32_mkobject elf32_arm_mkobject
13746 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13747 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13748 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13749 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13750 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13751 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13752 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13753 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13754 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13755 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13756 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13757 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13758 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13759 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13760 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13762 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13763 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13764 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13765 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13766 #define elf_backend_check_relocs elf32_arm_check_relocs
13767 #define elf_backend_relocate_section elf32_arm_relocate_section
13768 #define elf_backend_write_section elf32_arm_write_section
13769 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13770 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13771 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13772 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13773 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13774 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13775 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13776 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13777 #define elf_backend_object_p elf32_arm_object_p
13778 #define elf_backend_section_flags elf32_arm_section_flags
13779 #define elf_backend_fake_sections elf32_arm_fake_sections
13780 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13781 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13782 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13783 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13784 #define elf_backend_size_info elf32_arm_size_info
13785 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13786 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13787 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13788 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13789 #define elf_backend_is_function_type elf32_arm_is_function_type
13791 #define elf_backend_can_refcount 1
13792 #define elf_backend_can_gc_sections 1
13793 #define elf_backend_plt_readonly 1
13794 #define elf_backend_want_got_plt 1
13795 #define elf_backend_want_plt_sym 0
13796 #define elf_backend_may_use_rel_p 1
13797 #define elf_backend_may_use_rela_p 0
13798 #define elf_backend_default_use_rela_p 0
13800 #define elf_backend_got_header_size 12
13802 #undef elf_backend_obj_attrs_vendor
13803 #define elf_backend_obj_attrs_vendor "aeabi"
13804 #undef elf_backend_obj_attrs_section
13805 #define elf_backend_obj_attrs_section ".ARM.attributes"
13806 #undef elf_backend_obj_attrs_arg_type
13807 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13808 #undef elf_backend_obj_attrs_section_type
13809 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13810 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13812 #include "elf32-target.h"
13814 /* VxWorks Targets. */
13816 #undef TARGET_LITTLE_SYM
13817 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13818 #undef TARGET_LITTLE_NAME
13819 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13820 #undef TARGET_BIG_SYM
13821 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13822 #undef TARGET_BIG_NAME
13823 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13825 /* Like elf32_arm_link_hash_table_create -- but overrides
13826 appropriately for VxWorks. */
13828 static struct bfd_link_hash_table *
13829 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13831 struct bfd_link_hash_table *ret;
13833 ret = elf32_arm_link_hash_table_create (abfd);
13836 struct elf32_arm_link_hash_table *htab
13837 = (struct elf32_arm_link_hash_table *) ret;
13839 htab->vxworks_p = 1;
13845 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13847 elf32_arm_final_write_processing (abfd, linker);
13848 elf_vxworks_final_write_processing (abfd, linker);
13852 #define elf32_bed elf32_arm_vxworks_bed
13854 #undef bfd_elf32_bfd_link_hash_table_create
13855 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13856 #undef elf_backend_add_symbol_hook
13857 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13858 #undef elf_backend_final_write_processing
13859 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13860 #undef elf_backend_emit_relocs
13861 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13863 #undef elf_backend_may_use_rel_p
13864 #define elf_backend_may_use_rel_p 0
13865 #undef elf_backend_may_use_rela_p
13866 #define elf_backend_may_use_rela_p 1
13867 #undef elf_backend_default_use_rela_p
13868 #define elf_backend_default_use_rela_p 1
13869 #undef elf_backend_want_plt_sym
13870 #define elf_backend_want_plt_sym 1
13871 #undef ELF_MAXPAGESIZE
13872 #define ELF_MAXPAGESIZE 0x1000
13874 #include "elf32-target.h"
13877 /* Symbian OS Targets. */
13879 #undef TARGET_LITTLE_SYM
13880 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13881 #undef TARGET_LITTLE_NAME
13882 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13883 #undef TARGET_BIG_SYM
13884 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13885 #undef TARGET_BIG_NAME
13886 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13888 /* Like elf32_arm_link_hash_table_create -- but overrides
13889 appropriately for Symbian OS. */
13891 static struct bfd_link_hash_table *
13892 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13894 struct bfd_link_hash_table *ret;
13896 ret = elf32_arm_link_hash_table_create (abfd);
13899 struct elf32_arm_link_hash_table *htab
13900 = (struct elf32_arm_link_hash_table *)ret;
13901 /* There is no PLT header for Symbian OS. */
13902 htab->plt_header_size = 0;
13903 /* The PLT entries are each one instruction and one word. */
13904 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13905 htab->symbian_p = 1;
13906 /* Symbian uses armv5t or above, so use_blx is always true. */
13908 htab->root.is_relocatable_executable = 1;
13913 static const struct bfd_elf_special_section
13914 elf32_arm_symbian_special_sections[] =
13916 /* In a BPABI executable, the dynamic linking sections do not go in
13917 the loadable read-only segment. The post-linker may wish to
13918 refer to these sections, but they are not part of the final
13920 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13921 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13922 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13923 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13924 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13925 /* These sections do not need to be writable as the SymbianOS
13926 postlinker will arrange things so that no dynamic relocation is
13928 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13929 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13930 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13931 { NULL, 0, 0, 0, 0 }
13935 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13936 struct bfd_link_info *link_info)
13938 /* BPABI objects are never loaded directly by an OS kernel; they are
13939 processed by a postlinker first, into an OS-specific format. If
13940 the D_PAGED bit is set on the file, BFD will align segments on
13941 page boundaries, so that an OS can directly map the file. With
13942 BPABI objects, that just results in wasted space. In addition,
13943 because we clear the D_PAGED bit, map_sections_to_segments will
13944 recognize that the program headers should not be mapped into any
13945 loadable segment. */
13946 abfd->flags &= ~D_PAGED;
13947 elf32_arm_begin_write_processing (abfd, link_info);
13951 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13952 struct bfd_link_info *info)
13954 struct elf_segment_map *m;
13957 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13958 segment. However, because the .dynamic section is not marked
13959 with SEC_LOAD, the generic ELF code will not create such a
13961 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13964 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13965 if (m->p_type == PT_DYNAMIC)
13970 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13971 m->next = elf_tdata (abfd)->segment_map;
13972 elf_tdata (abfd)->segment_map = m;
13976 /* Also call the generic arm routine. */
13977 return elf32_arm_modify_segment_map (abfd, info);
13980 /* Return address for Ith PLT stub in section PLT, for relocation REL
13981 or (bfd_vma) -1 if it should not be included. */
13984 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13985 const arelent *rel ATTRIBUTE_UNUSED)
13987 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13992 #define elf32_bed elf32_arm_symbian_bed
13994 /* The dynamic sections are not allocated on SymbianOS; the postlinker
13995 will process them and then discard them. */
13996 #undef ELF_DYNAMIC_SEC_FLAGS
13997 #define ELF_DYNAMIC_SEC_FLAGS \
13998 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14000 #undef elf_backend_add_symbol_hook
14001 #undef elf_backend_emit_relocs
14003 #undef bfd_elf32_bfd_link_hash_table_create
14004 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14005 #undef elf_backend_special_sections
14006 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14007 #undef elf_backend_begin_write_processing
14008 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14009 #undef elf_backend_final_write_processing
14010 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14012 #undef elf_backend_modify_segment_map
14013 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14015 /* There is no .got section for BPABI objects, and hence no header. */
14016 #undef elf_backend_got_header_size
14017 #define elf_backend_got_header_size 0
14019 /* Similarly, there is no .got.plt section. */
14020 #undef elf_backend_want_got_plt
14021 #define elf_backend_want_got_plt 0
14023 #undef elf_backend_plt_sym_val
14024 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14026 #undef elf_backend_may_use_rel_p
14027 #define elf_backend_may_use_rel_p 1
14028 #undef elf_backend_may_use_rela_p
14029 #define elf_backend_may_use_rela_p 0
14030 #undef elf_backend_default_use_rela_p
14031 #define elf_backend_default_use_rela_p 0
14032 #undef elf_backend_want_plt_sym
14033 #define elf_backend_want_plt_sym 0
14034 #undef ELF_MAXPAGESIZE
14035 #define ELF_MAXPAGESIZE 0x8000
14037 #include "elf32-target.h"