1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996 Free Software Foundation, Inc.
3 Contributed by Cygnus Support.
7 This file is part of BFD, the Binary File Descriptor library.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
29 #include "coff/internal.h"
32 /* Internal functions. */
33 static bfd_reloc_status_type sh_reloc
34 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
35 static long get_symbol_value PARAMS ((asymbol *));
36 static boolean sh_relax_section
37 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
38 static boolean sh_relax_delete_bytes
39 PARAMS ((bfd *, asection *, bfd_vma, int));
40 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
41 static boolean sh_align_loads
42 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
43 static boolean sh_swap_insns
44 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, bfd_vma));
45 static boolean sh_relocate_section
46 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
47 struct internal_reloc *, struct internal_syment *, asection **));
48 static bfd_byte *sh_coff_get_relocated_section_contents
49 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
50 bfd_byte *, boolean, asymbol **));
52 /* Default section alignment to 2**2. */
53 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER (2)
55 /* Generate long file names. */
56 #define COFF_LONG_FILENAMES
58 /* The supported relocations. There are a lot of relocations defined
59 in coff/internal.h which we do not expect to ever see. */
60 static reloc_howto_type sh_coff_howtos[] =
65 { 3 }, /* R_SH_PCREL8 */
66 { 4 }, /* R_SH_PCREL16 */
67 { 5 }, /* R_SH_HIGH8 */
68 { 6 }, /* R_SH_IMM24 */
69 { 7 }, /* R_SH_LOW16 */
71 { 9 }, /* R_SH_PCDISP8BY4 */
73 HOWTO (R_SH_PCDISP8BY2, /* type */
75 1, /* size (0 = byte, 1 = short, 2 = long) */
77 true, /* pc_relative */
79 complain_overflow_signed, /* complain_on_overflow */
80 sh_reloc, /* special_function */
81 "r_pcdisp8by2", /* name */
82 true, /* partial_inplace */
85 true), /* pcrel_offset */
87 { 11 }, /* R_SH_PCDISP8 */
89 HOWTO (R_SH_PCDISP, /* type */
91 1, /* size (0 = byte, 1 = short, 2 = long) */
93 true, /* pc_relative */
95 complain_overflow_signed, /* complain_on_overflow */
96 sh_reloc, /* special_function */
97 "r_pcdisp12by2", /* name */
98 true, /* partial_inplace */
100 0xfff, /* dst_mask */
101 true), /* pcrel_offset */
105 HOWTO (R_SH_IMM32, /* type */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
109 false, /* pc_relative */
111 complain_overflow_bitfield, /* complain_on_overflow */
112 sh_reloc, /* special_function */
113 "r_imm32", /* name */
114 true, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 false), /* pcrel_offset */
120 { 16 }, /* R_SH_IMM8 */
121 { 17 }, /* R_SH_IMM8BY2 */
122 { 18 }, /* R_SH_IMM8BY4 */
123 { 19 }, /* R_SH_IMM4 */
124 { 20 }, /* R_SH_IMM4BY2 */
125 { 21 }, /* R_SH_IMM4BY4 */
127 HOWTO (R_SH_PCRELIMM8BY2, /* type */
129 1, /* size (0 = byte, 1 = short, 2 = long) */
131 true, /* pc_relative */
133 complain_overflow_unsigned, /* complain_on_overflow */
134 sh_reloc, /* special_function */
135 "r_pcrelimm8by2", /* name */
136 true, /* partial_inplace */
139 true), /* pcrel_offset */
141 HOWTO (R_SH_PCRELIMM8BY4, /* type */
143 1, /* size (0 = byte, 1 = short, 2 = long) */
145 true, /* pc_relative */
147 complain_overflow_unsigned, /* complain_on_overflow */
148 sh_reloc, /* special_function */
149 "r_pcrelimm8by4", /* name */
150 true, /* partial_inplace */
153 true), /* pcrel_offset */
155 HOWTO (R_SH_IMM16, /* type */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
159 false, /* pc_relative */
161 complain_overflow_bitfield, /* complain_on_overflow */
162 sh_reloc, /* special_function */
163 "r_imm16", /* name */
164 true, /* partial_inplace */
165 0xffff, /* src_mask */
166 0xffff, /* dst_mask */
167 false), /* pcrel_offset */
169 HOWTO (R_SH_SWITCH16, /* type */
171 1, /* size (0 = byte, 1 = short, 2 = long) */
173 false, /* pc_relative */
175 complain_overflow_bitfield, /* complain_on_overflow */
176 sh_reloc, /* special_function */
177 "r_switch16", /* name */
178 true, /* partial_inplace */
179 0xffff, /* src_mask */
180 0xffff, /* dst_mask */
181 false), /* pcrel_offset */
183 HOWTO (R_SH_SWITCH32, /* type */
185 2, /* size (0 = byte, 1 = short, 2 = long) */
187 false, /* pc_relative */
189 complain_overflow_bitfield, /* complain_on_overflow */
190 sh_reloc, /* special_function */
191 "r_switch32", /* name */
192 true, /* partial_inplace */
193 0xffffffff, /* src_mask */
194 0xffffffff, /* dst_mask */
195 false), /* pcrel_offset */
197 HOWTO (R_SH_USES, /* type */
199 1, /* size (0 = byte, 1 = short, 2 = long) */
201 false, /* pc_relative */
203 complain_overflow_bitfield, /* complain_on_overflow */
204 sh_reloc, /* special_function */
206 true, /* partial_inplace */
207 0xffff, /* src_mask */
208 0xffff, /* dst_mask */
209 false), /* pcrel_offset */
211 HOWTO (R_SH_COUNT, /* type */
213 2, /* size (0 = byte, 1 = short, 2 = long) */
215 false, /* pc_relative */
217 complain_overflow_bitfield, /* complain_on_overflow */
218 sh_reloc, /* special_function */
219 "r_count", /* name */
220 true, /* partial_inplace */
221 0xffffffff, /* src_mask */
222 0xffffffff, /* dst_mask */
223 false), /* pcrel_offset */
225 HOWTO (R_SH_ALIGN, /* type */
227 2, /* size (0 = byte, 1 = short, 2 = long) */
229 false, /* pc_relative */
231 complain_overflow_bitfield, /* complain_on_overflow */
232 sh_reloc, /* special_function */
233 "r_align", /* name */
234 true, /* partial_inplace */
235 0xffffffff, /* src_mask */
236 0xffffffff, /* dst_mask */
237 false), /* pcrel_offset */
239 HOWTO (R_SH_CODE, /* type */
241 2, /* size (0 = byte, 1 = short, 2 = long) */
243 false, /* pc_relative */
245 complain_overflow_bitfield, /* complain_on_overflow */
246 sh_reloc, /* special_function */
248 true, /* partial_inplace */
249 0xffffffff, /* src_mask */
250 0xffffffff, /* dst_mask */
251 false), /* pcrel_offset */
253 HOWTO (R_SH_DATA, /* type */
255 2, /* size (0 = byte, 1 = short, 2 = long) */
257 false, /* pc_relative */
259 complain_overflow_bitfield, /* complain_on_overflow */
260 sh_reloc, /* special_function */
262 true, /* partial_inplace */
263 0xffffffff, /* src_mask */
264 0xffffffff, /* dst_mask */
265 false), /* pcrel_offset */
267 HOWTO (R_SH_LABEL, /* type */
269 2, /* size (0 = byte, 1 = short, 2 = long) */
271 false, /* pc_relative */
273 complain_overflow_bitfield, /* complain_on_overflow */
274 sh_reloc, /* special_function */
275 "r_label", /* name */
276 true, /* partial_inplace */
277 0xffffffff, /* src_mask */
278 0xffffffff, /* dst_mask */
279 false) /* pcrel_offset */
282 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
284 /* Check for a bad magic number. */
285 #define BADMAG(x) SHBADMAG(x)
287 /* Customize coffcode.h (this is not currently used). */
290 /* FIXME: This should not be set here. */
291 #define __A_MAGIC_SET__
293 /* Swap the r_offset field in and out. */
294 #define SWAP_IN_RELOC_OFFSET bfd_h_get_32
295 #define SWAP_OUT_RELOC_OFFSET bfd_h_put_32
297 /* Swap out extra information in the reloc structure. */
298 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
301 dst->r_stuff[0] = 'S'; \
302 dst->r_stuff[1] = 'C'; \
306 /* Get the value of a symbol, when performing a relocation. */
309 get_symbol_value (symbol)
314 if (bfd_is_com_section (symbol->section))
317 relocation = (symbol->value +
318 symbol->section->output_section->vma +
319 symbol->section->output_offset);
324 /* This macro is used in coffcode.h to get the howto corresponding to
325 an internal reloc. */
327 #define RTYPE2HOWTO(relent, internal) \
329 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
330 ? &sh_coff_howtos[(internal)->r_type] \
331 : (reloc_howto_type *) NULL))
333 /* This is the same as the macro in coffcode.h, except that it copies
334 r_offset into reloc_entry->addend for some relocs. */
335 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
337 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
338 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
339 coffsym = (obj_symbols (abfd) \
340 + (cache_ptr->sym_ptr_ptr - symbols)); \
342 coffsym = coff_symbol_from (abfd, ptr); \
343 if (coffsym != (coff_symbol_type *) NULL \
344 && coffsym->native->u.syment.n_scnum == 0) \
345 cache_ptr->addend = 0; \
346 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
347 && ptr->section != (asection *) NULL) \
348 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
350 cache_ptr->addend = 0; \
351 if ((reloc).r_type == R_SH_SWITCH16 \
352 || (reloc).r_type == R_SH_SWITCH32 \
353 || (reloc).r_type == R_SH_USES \
354 || (reloc).r_type == R_SH_COUNT \
355 || (reloc).r_type == R_SH_ALIGN) \
356 cache_ptr->addend = (reloc).r_offset; \
359 /* This is the howto function for the SH relocations. */
361 static bfd_reloc_status_type
362 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
365 arelent *reloc_entry;
368 asection *input_section;
370 char **error_message;
374 unsigned short r_type;
375 bfd_vma addr = reloc_entry->address;
376 bfd_byte *hit_data = addr + (bfd_byte *) data;
378 r_type = reloc_entry->howto->type;
380 if (output_bfd != NULL)
382 /* Partial linking--do nothing. */
383 reloc_entry->address += input_section->output_offset;
387 /* Almost all relocs have to do with relaxing. If any work must be
388 done for them, it has been done in sh_relax_section. */
389 if (r_type != R_SH_IMM32
390 && (r_type != R_SH_PCDISP
391 || (symbol_in->flags & BSF_LOCAL) != 0))
394 if (symbol_in != NULL
395 && bfd_is_und_section (symbol_in->section))
396 return bfd_reloc_undefined;
398 sym_value = get_symbol_value (symbol_in);
403 insn = bfd_get_32 (abfd, hit_data);
404 insn += sym_value + reloc_entry->addend;
405 bfd_put_32 (abfd, insn, hit_data);
408 insn = bfd_get_16 (abfd, hit_data);
409 sym_value += reloc_entry->addend;
410 sym_value -= (input_section->output_section->vma
411 + input_section->output_offset
414 sym_value += (insn & 0xfff) << 1;
417 insn = (insn & 0xf000) | (sym_value & 0xfff);
418 bfd_put_16 (abfd, insn, hit_data);
419 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
420 return bfd_reloc_overflow;
430 /* We can do relaxing. */
431 #define coff_bfd_relax_section sh_relax_section
433 /* We use the special COFF backend linker. */
434 #define coff_relocate_section sh_relocate_section
436 /* When relaxing, we need to use special code to get the relocated
438 #define coff_bfd_get_relocated_section_contents \
439 sh_coff_get_relocated_section_contents
441 #include "coffcode.h"
443 /* This function handles relaxing on the SH.
445 Function calls on the SH look like this:
454 The compiler and assembler will cooperate to create R_SH_USES
455 relocs on the jsr instructions. The r_offset field of the
456 R_SH_USES reloc is the PC relative offset to the instruction which
457 loads the register (the r_offset field is computed as though it
458 were a jump instruction, so the offset value is actually from four
459 bytes past the instruction). The linker can use this reloc to
460 determine just which function is being called, and thus decide
461 whether it is possible to replace the jsr with a bsr.
463 If multiple function calls are all based on a single register load
464 (i.e., the same function is called multiple times), the compiler
465 guarantees that each function call will have an R_SH_USES reloc.
466 Therefore, if the linker is able to convert each R_SH_USES reloc
467 which refers to that address, it can safely eliminate the register
470 When the assembler creates an R_SH_USES reloc, it examines it to
471 determine which address is being loaded (L1 in the above example).
472 It then counts the number of references to that address, and
473 creates an R_SH_COUNT reloc at that address. The r_offset field of
474 the R_SH_COUNT reloc will be the number of references. If the
475 linker is able to eliminate a register load, it can use the
476 R_SH_COUNT reloc to see whether it can also eliminate the function
479 SH relaxing also handles another, unrelated, matter. On the SH, if
480 a load or store instruction is not aligned on a four byte boundary,
481 the memory cycle interferes with the 32 bit instruction fetch,
482 causing a one cycle bubble in the pipeline. Therefore, we try to
483 align load and store instructions on four byte boundaries if we
484 can, by swapping them with one of the adjacent instructions. */
487 sh_relax_section (abfd, sec, link_info, again)
490 struct bfd_link_info *link_info;
493 struct internal_reloc *internal_relocs;
494 struct internal_reloc *free_relocs = NULL;
496 struct internal_reloc *irel, *irelend;
497 bfd_byte *contents = NULL;
498 bfd_byte *free_contents = NULL;
502 if (link_info->relocateable
503 || (sec->flags & SEC_RELOC) == 0
504 || sec->reloc_count == 0)
507 /* If this is the first time we have been called for this section,
508 initialize the cooked size. */
509 if (sec->_cooked_size == 0)
510 sec->_cooked_size = sec->_raw_size;
512 internal_relocs = (_bfd_coff_read_internal_relocs
513 (abfd, sec, link_info->keep_memory,
514 (bfd_byte *) NULL, false,
515 (struct internal_reloc *) NULL));
516 if (internal_relocs == NULL)
518 if (! link_info->keep_memory)
519 free_relocs = internal_relocs;
523 irelend = internal_relocs + sec->reloc_count;
524 for (irel = internal_relocs; irel < irelend; irel++)
526 bfd_vma laddr, paddr, symval;
528 struct internal_reloc *irelfn, *irelscan, *irelcount;
529 struct internal_syment sym;
532 if (irel->r_type == R_SH_CODE)
535 if (irel->r_type != R_SH_USES)
538 /* Get the section contents. */
539 if (contents == NULL)
541 if (coff_section_data (abfd, sec) != NULL
542 && coff_section_data (abfd, sec)->contents != NULL)
543 contents = coff_section_data (abfd, sec)->contents;
546 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
547 if (contents == NULL)
549 free_contents = contents;
551 if (! bfd_get_section_contents (abfd, sec, contents,
552 (file_ptr) 0, sec->_raw_size))
557 /* The r_offset field of the R_SH_USES reloc will point us to
558 the register load. The 4 is because the r_offset field is
559 computed as though it were a jump offset, which are based
560 from 4 bytes after the jump instruction. */
561 laddr = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
562 if (laddr >= sec->_raw_size)
564 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
565 bfd_get_filename (abfd),
566 (unsigned long) irel->r_vaddr);
569 insn = bfd_get_16 (abfd, contents + laddr);
571 /* If the instruction is not mov.l NN,rN, we don't know what to
573 if ((insn & 0xf000) != 0xd000)
575 ((*_bfd_error_handler)
576 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
577 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr, insn));
581 /* Get the address from which the register is being loaded. The
582 displacement in the mov.l instruction is quadrupled. It is a
583 displacement from four bytes after the movl instruction, but,
584 before adding in the PC address, two least significant bits
585 of the PC are cleared. We assume that the section is aligned
586 on a four byte boundary. */
589 paddr += (laddr + 4) &~ 3;
590 if (paddr >= sec->_raw_size)
592 ((*_bfd_error_handler)
593 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
594 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
598 /* Get the reloc for the address from which the register is
599 being loaded. This reloc will tell us which function is
600 actually being called. */
602 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
603 if (irelfn->r_vaddr == paddr
604 && irelfn->r_type == R_SH_IMM32)
606 if (irelfn >= irelend)
608 ((*_bfd_error_handler)
609 ("%s: 0x%lx: warning: could not find expected reloc",
610 bfd_get_filename (abfd), (unsigned long) paddr));
614 /* Get the value of the symbol referred to by the reloc. */
615 if (! _bfd_coff_get_external_symbols (abfd))
617 bfd_coff_swap_sym_in (abfd,
618 ((bfd_byte *) obj_coff_external_syms (abfd)
620 * bfd_coff_symesz (abfd))),
622 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
624 ((*_bfd_error_handler)
625 ("%s: 0x%lx: warning: symbol in unexpected section",
626 bfd_get_filename (abfd), (unsigned long) paddr));
630 if (sym.n_sclass != C_EXT)
632 symval = (sym.n_value
634 + sec->output_section->vma
635 + sec->output_offset);
639 struct coff_link_hash_entry *h;
641 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
642 BFD_ASSERT (h != NULL);
643 if (h->root.type != bfd_link_hash_defined
644 && h->root.type != bfd_link_hash_defweak)
646 /* This appears to be a reference to an undefined
647 symbol. Just ignore it--it will be caught by the
648 regular reloc processing. */
652 symval = (h->root.u.def.value
653 + h->root.u.def.section->output_section->vma
654 + h->root.u.def.section->output_offset);
657 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
659 /* See if this function call can be shortened. */
663 + sec->output_section->vma
666 if (foff < -0x1000 || foff >= 0x1000)
668 /* After all that work, we can't shorten this function call. */
672 /* Shorten the function call. */
674 /* For simplicity of coding, we are going to modify the section
675 contents, the section relocs, and the BFD symbol table. We
676 must tell the rest of the code not to free up this
677 information. It would be possible to instead create a table
678 of changes which have to be made, as is done in coff-mips.c;
679 that would be more work, but would require less memory when
680 the linker is run. */
682 if (coff_section_data (abfd, sec) == NULL)
685 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
686 if (sec->used_by_bfd == NULL)
690 coff_section_data (abfd, sec)->relocs = internal_relocs;
691 coff_section_data (abfd, sec)->keep_relocs = true;
694 coff_section_data (abfd, sec)->contents = contents;
695 coff_section_data (abfd, sec)->keep_contents = true;
696 free_contents = NULL;
698 obj_coff_keep_syms (abfd) = true;
700 /* Replace the jsr with a bsr. */
702 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
703 replace the jsr with a bsr. */
704 irel->r_type = R_SH_PCDISP;
705 irel->r_symndx = irelfn->r_symndx;
706 if (sym.n_sclass != C_EXT)
708 /* If this needs to be changed because of future relaxing,
709 it will be handled here like other internal PCDISP
712 0xb000 | ((foff >> 1) & 0xfff),
713 contents + irel->r_vaddr - sec->vma);
717 /* We can't fully resolve this yet, because the external
718 symbol value may be changed by future relaxing. We let
719 the final link phase handle it. */
720 bfd_put_16 (abfd, 0xb000, contents + irel->r_vaddr - sec->vma);
723 /* See if there is another R_SH_USES reloc referring to the same
725 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
726 if (irelscan->r_type == R_SH_USES
727 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
729 if (irelscan < irelend)
731 /* Some other function call depends upon this register load,
732 and we have not yet converted that function call.
733 Indeed, we may never be able to convert it. There is
734 nothing else we can do at this point. */
738 /* Look for a R_SH_COUNT reloc on the location where the
739 function address is stored. Do this before deleting any
740 bytes, to avoid confusion about the address. */
741 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
742 if (irelcount->r_vaddr == paddr
743 && irelcount->r_type == R_SH_COUNT)
746 /* Delete the register load. */
747 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
750 /* That will change things, so, just in case it permits some
751 other function call to come within range, we should relax
752 again. Note that this is not required, and it may be slow. */
755 /* Now check whether we got a COUNT reloc. */
756 if (irelcount >= irelend)
758 ((*_bfd_error_handler)
759 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
760 bfd_get_filename (abfd), (unsigned long) paddr));
764 /* The number of uses is stored in the r_offset field. We've
766 if (irelcount->r_offset == 0)
768 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
769 bfd_get_filename (abfd),
770 (unsigned long) paddr));
774 --irelcount->r_offset;
776 /* If there are no more uses, we can delete the address. Reload
777 the address from irelfn, in case it was changed by the
778 previous call to sh_relax_delete_bytes. */
779 if (irelcount->r_offset == 0)
781 if (! sh_relax_delete_bytes (abfd, sec,
782 irelfn->r_vaddr - sec->vma, 4))
786 /* We've done all we can with that function call. */
789 /* Look for load and store instructions that we can align on four
795 /* Get the section contents. */
796 if (contents == NULL)
798 if (coff_section_data (abfd, sec) != NULL
799 && coff_section_data (abfd, sec)->contents != NULL)
800 contents = coff_section_data (abfd, sec)->contents;
803 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
804 if (contents == NULL)
806 free_contents = contents;
808 if (! bfd_get_section_contents (abfd, sec, contents,
809 (file_ptr) 0, sec->_raw_size))
814 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
819 if (coff_section_data (abfd, sec) == NULL)
822 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
823 if (sec->used_by_bfd == NULL)
827 coff_section_data (abfd, sec)->relocs = internal_relocs;
828 coff_section_data (abfd, sec)->keep_relocs = true;
831 coff_section_data (abfd, sec)->contents = contents;
832 coff_section_data (abfd, sec)->keep_contents = true;
833 free_contents = NULL;
835 obj_coff_keep_syms (abfd) = true;
839 if (free_relocs != NULL)
845 if (free_contents != NULL)
847 if (! link_info->keep_memory)
848 free (free_contents);
851 /* Cache the section contents for coff_link_input_bfd. */
852 if (coff_section_data (abfd, sec) == NULL)
855 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
856 if (sec->used_by_bfd == NULL)
858 coff_section_data (abfd, sec)->relocs = NULL;
860 coff_section_data (abfd, sec)->contents = contents;
867 if (free_relocs != NULL)
869 if (free_contents != NULL)
870 free (free_contents);
874 /* Delete some bytes from a section while relaxing. */
877 sh_relax_delete_bytes (abfd, sec, addr, count)
884 struct internal_reloc *irel, *irelend;
885 struct internal_reloc *irelalign;
887 bfd_byte *esym, *esymend;
888 bfd_size_type symesz;
889 struct coff_link_hash_entry **sym_hash;
892 contents = coff_section_data (abfd, sec)->contents;
894 /* The deletion must stop at the next ALIGN reloc for an aligment
895 power larger than the number of bytes we are deleting. */
898 toaddr = sec->_cooked_size;
900 irel = coff_section_data (abfd, sec)->relocs;
901 irelend = irel + sec->reloc_count;
902 for (; irel < irelend; irel++)
904 if (irel->r_type == R_SH_ALIGN
905 && irel->r_vaddr - sec->vma > addr
906 && count < (1 << irel->r_offset))
909 toaddr = irel->r_vaddr - sec->vma;
914 /* Actually delete the bytes. */
915 memmove (contents + addr, contents + addr + count, toaddr - addr - count);
916 if (irelalign == NULL)
917 sec->_cooked_size -= count;
922 #define NOP_OPCODE (0x0009)
924 BFD_ASSERT ((count & 1) == 0);
925 for (i = 0; i < count; i += 2)
926 bfd_put_16 (abfd, NOP_OPCODE, contents + toaddr - count + i);
929 /* Adjust all the relocs. */
930 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
932 bfd_vma nraddr, start, stop;
934 struct internal_syment sym;
935 int off, adjust, oinsn;
939 /* Get the new reloc address. */
940 nraddr = irel->r_vaddr - sec->vma;
941 if ((irel->r_vaddr - sec->vma > addr
942 && irel->r_vaddr - sec->vma < toaddr)
943 || (irel->r_type == R_SH_ALIGN
944 && irel->r_vaddr - sec->vma == toaddr))
947 /* See if this reloc was for the bytes we have deleted, in which
948 case we no longer care about it. Don't delete relocs which
949 represent addresses, though. */
950 if (irel->r_vaddr - sec->vma >= addr
951 && irel->r_vaddr - sec->vma < addr + count
952 && irel->r_type != R_SH_ALIGN
953 && irel->r_type != R_SH_CODE
954 && irel->r_type != R_SH_DATA)
955 irel->r_type = R_SH_UNUSED;
957 /* If this is a PC relative reloc, see if the range it covers
958 includes the bytes we have deleted. */
959 switch (irel->r_type)
964 case R_SH_PCDISP8BY2:
966 case R_SH_PCRELIMM8BY2:
967 case R_SH_PCRELIMM8BY4:
968 start = irel->r_vaddr - sec->vma;
969 insn = bfd_get_16 (abfd, contents + nraddr);
973 switch (irel->r_type)
980 /* If this reloc is against a symbol defined in this
981 section, and the symbol will not be adjusted below, we
982 must check the addend to see it will put the value in
983 range to be adjusted, and hence must be changed. */
984 bfd_coff_swap_sym_in (abfd,
985 ((bfd_byte *) obj_coff_external_syms (abfd)
987 * bfd_coff_symesz (abfd))),
989 if (sym.n_sclass != C_EXT
990 && sym.n_scnum == sec->target_index
991 && ((bfd_vma) sym.n_value <= addr
992 || (bfd_vma) sym.n_value >= toaddr))
996 val = bfd_get_32 (abfd, contents + nraddr);
998 if (val >= addr && val < toaddr)
999 bfd_put_32 (abfd, val - count, contents + nraddr);
1001 start = stop = addr;
1004 case R_SH_PCDISP8BY2:
1008 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1012 bfd_coff_swap_sym_in (abfd,
1013 ((bfd_byte *) obj_coff_external_syms (abfd)
1015 * bfd_coff_symesz (abfd))),
1017 if (sym.n_sclass == C_EXT)
1018 start = stop = addr;
1024 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1028 case R_SH_PCRELIMM8BY2:
1030 stop = start + 4 + off * 2;
1033 case R_SH_PCRELIMM8BY4:
1035 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1040 /* These relocs types represent
1042 The r_offset field holds the difference between the reloc
1043 address and L1. That is the start of the reloc, and
1044 adding in the contents gives us the top. We must adjust
1045 both the r_offset field and the section contents. */
1047 start = irel->r_vaddr - sec->vma;
1048 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1052 && (stop <= addr || stop >= toaddr))
1053 irel->r_offset += count;
1054 else if (stop > addr
1056 && (start <= addr || start >= toaddr))
1057 irel->r_offset -= count;
1061 if (irel->r_type == R_SH_SWITCH16)
1062 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1064 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1065 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1070 start = irel->r_vaddr - sec->vma;
1071 stop = (bfd_vma) ((bfd_signed_vma) start
1072 + (long) irel->r_offset
1079 && (stop <= addr || stop >= toaddr))
1081 else if (stop > addr
1083 && (start <= addr || start >= toaddr))
1092 switch (irel->r_type)
1098 case R_SH_PCDISP8BY2:
1099 case R_SH_PCRELIMM8BY2:
1101 if ((oinsn & 0xff00) != (insn & 0xff00))
1103 bfd_put_16 (abfd, insn, contents + nraddr);
1108 if ((oinsn & 0xf000) != (insn & 0xf000))
1110 bfd_put_16 (abfd, insn, contents + nraddr);
1113 case R_SH_PCRELIMM8BY4:
1114 BFD_ASSERT (adjust == count || count >= 4);
1119 if ((irel->r_vaddr & 3) == 0)
1122 if ((oinsn & 0xff00) != (insn & 0xff00))
1124 bfd_put_16 (abfd, insn, contents + nraddr);
1129 if (voff < - 0x8000 || voff >= 0x8000)
1131 bfd_put_signed_16 (abfd, voff, contents + nraddr);
1136 bfd_put_signed_32 (abfd, voff, contents + nraddr);
1140 irel->r_offset += adjust;
1146 ((*_bfd_error_handler)
1147 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1148 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
1149 bfd_set_error (bfd_error_bad_value);
1154 irel->r_vaddr = nraddr + sec->vma;
1157 /* Look through all the other sections. If there contain any IMM32
1158 relocs against internal symbols which we are not going to adjust
1159 below, we may need to adjust the addends. */
1160 for (o = abfd->sections; o != NULL; o = o->next)
1162 struct internal_reloc *internal_relocs;
1163 struct internal_reloc *irelscan, *irelscanend;
1164 bfd_byte *ocontents;
1167 || (o->flags & SEC_RELOC) == 0
1168 || o->reloc_count == 0)
1171 /* We always cache the relocs. Perhaps, if info->keep_memory is
1172 false, we should free them, if we are permitted to, when we
1173 leave sh_coff_relax_section. */
1174 internal_relocs = (_bfd_coff_read_internal_relocs
1175 (abfd, o, true, (bfd_byte *) NULL, false,
1176 (struct internal_reloc *) NULL));
1177 if (internal_relocs == NULL)
1181 irelscanend = internal_relocs + o->reloc_count;
1182 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1184 struct internal_syment sym;
1186 if (irelscan->r_type != R_SH_IMM32)
1189 bfd_coff_swap_sym_in (abfd,
1190 ((bfd_byte *) obj_coff_external_syms (abfd)
1191 + (irelscan->r_symndx
1192 * bfd_coff_symesz (abfd))),
1194 if (sym.n_sclass != C_EXT
1195 && sym.n_scnum == sec->target_index
1196 && ((bfd_vma) sym.n_value <= addr
1197 || (bfd_vma) sym.n_value >= toaddr))
1201 if (ocontents == NULL)
1203 if (coff_section_data (abfd, o)->contents != NULL)
1204 ocontents = coff_section_data (abfd, o)->contents;
1207 /* We always cache the section contents.
1208 Perhaps, if info->keep_memory is false, we
1209 should free them, if we are permitted to,
1210 when we leave sh_coff_relax_section. */
1211 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1212 if (ocontents == NULL)
1214 if (! bfd_get_section_contents (abfd, o, ocontents,
1218 coff_section_data (abfd, o)->contents = ocontents;
1222 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1224 if (val >= addr && val < toaddr)
1225 bfd_put_32 (abfd, val - count,
1226 ocontents + irelscan->r_vaddr - o->vma);
1228 coff_section_data (abfd, o)->keep_contents = true;
1233 /* Adjusting the internal symbols will not work if something has
1234 already retrieved the generic symbols. It would be possible to
1235 make this work by adjusting the generic symbols at the same time.
1236 However, this case should not arise in normal usage. */
1237 if (obj_symbols (abfd) != NULL
1238 || obj_raw_syments (abfd) != NULL)
1240 ((*_bfd_error_handler)
1241 ("%s: fatal: generic symbols retrieved before relaxing",
1242 bfd_get_filename (abfd)));
1243 bfd_set_error (bfd_error_invalid_operation);
1247 /* Adjust all the symbols. */
1248 sym_hash = obj_coff_sym_hashes (abfd);
1249 symesz = bfd_coff_symesz (abfd);
1250 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1251 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1252 while (esym < esymend)
1254 struct internal_syment isym;
1256 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1258 if (isym.n_scnum == sec->target_index
1259 && (bfd_vma) isym.n_value > addr
1260 && (bfd_vma) isym.n_value < toaddr)
1262 isym.n_value -= count;
1264 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1266 if (*sym_hash != NULL)
1268 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1269 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1270 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1271 && (*sym_hash)->root.u.def.value < toaddr);
1272 (*sym_hash)->root.u.def.value -= count;
1276 esym += (isym.n_numaux + 1) * symesz;
1277 sym_hash += isym.n_numaux + 1;
1280 /* See if we can move the ALIGN reloc forward. We have adjusted
1281 r_vaddr for it already. */
1282 if (irelalign != NULL)
1286 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1287 1 << irelalign->r_offset);
1288 if (alignaddr != toaddr)
1290 /* Tail recursion. */
1291 return sh_relax_delete_bytes (abfd, sec,
1292 irelalign->r_vaddr - sec->vma,
1293 1 << irelalign->r_offset);
1300 /* This is yet another version of the SH opcode table, used to rapidly
1301 get information about a particular instruction. */
1303 /* The opcode map is represented by an array of these structures. The
1304 array is indexed by the high order four bits in the instruction. */
1306 struct sh_major_opcode
1308 /* A pointer to the instruction list. This is an array which
1309 contains all the instructions with this major opcode. */
1310 const struct sh_minor_opcode *minor_opcodes;
1311 /* The number of elements in minor_opcodes. */
1312 unsigned short count;
1315 /* This structure holds information for a set of SH opcodes. The
1316 instruction code is anded with the mask value, and the resulting
1317 value is used to search the order opcode list. */
1319 struct sh_minor_opcode
1321 /* The sorted opcode list. */
1322 const struct sh_opcode *opcodes;
1323 /* The number of elements in opcodes. */
1324 unsigned short count;
1325 /* The mask value to use when searching the opcode list. */
1326 unsigned short mask;
1329 /* This structure holds information for an SH instruction. An array
1330 of these structures is sorted in order by opcode. */
1334 /* The code for this instruction, after it has been anded with the
1335 mask value in the sh_major_opcode structure. */
1336 unsigned short opcode;
1337 /* Flags for this instruction. */
1338 unsigned short flags;
1341 /* Flag which appear in the sh_opcode structure. */
1343 /* This instruction loads a value from memory. */
1346 /* This instruction stores a value to memory. */
1349 /* This instruction is a branch. */
1350 #define BRANCH (0x4)
1352 /* This instruction has a delay slot. */
1355 /* This instruction uses the value in the register in the field at
1356 mask 0x0f00 of the instruction. */
1357 #define USES1 (0x10)
1359 /* This instruction uses the value in the register in the field at
1360 mask 0x00f0 of the instruction. */
1361 #define USES2 (0x20)
1363 /* This instruction uses the value in register 0. */
1364 #define USESR0 (0x40)
1366 /* This instruction sets the value in the register in the field at
1367 mask 0x0f00 of the instruction. */
1368 #define SETS1 (0x80)
1370 /* This instruction sets the value in the register in the field at
1371 mask 0x00f0 of the instruction. */
1372 #define SETS2 (0x100)
1374 /* This instruction sets register 0. */
1375 #define SETSR0 (0x200)
1377 /* This instruction sets a special register. */
1378 #define SETSSP (0x400)
1380 /* This instruction uses a special register. */
1381 #define USESSP (0x800)
1383 /* This instruction uses the floating point register in the field at
1384 mask 0x0f00 of the instruction. */
1385 #define USESF1 (0x1000)
1387 /* This instruction uses the floating point register in the field at
1388 mask 0x00f0 of the instruction. */
1389 #define USESF2 (0x2000)
1391 /* This instruction uses floating point register 0. */
1392 #define USESF0 (0x4000)
1394 /* This instruction sets the floating point register in the field at
1395 mask 0x0f00 of the instruction. */
1396 #define SETSF1 (0x8000)
1398 static boolean sh_insn_uses_reg
1399 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1400 static boolean sh_insn_uses_freg
1401 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1402 static boolean sh_insns_conflict
1403 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1404 const struct sh_opcode *));
1405 static boolean sh_load_use
1406 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1407 const struct sh_opcode *));
1409 /* The opcode maps. */
1411 #define MAP(a) a, sizeof a / sizeof a[0]
1413 static const struct sh_opcode sh_opcode00[] =
1415 { 0x0008, SETSSP }, /* clrt */
1416 { 0x0009, 0 }, /* nop */
1417 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1418 { 0x0018, SETSSP }, /* sett */
1419 { 0x0019, SETSSP }, /* div0u */
1420 { 0x001b, 0 }, /* sleep */
1421 { 0x0028, SETSSP }, /* clrmac */
1422 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1423 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1424 { 0x0048, SETSSP }, /* clrs */
1425 { 0x0058, SETSSP } /* sets */
1428 static const struct sh_opcode sh_opcode01[] =
1430 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1431 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1432 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1433 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1434 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1435 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1436 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1437 { 0x0029, SETS1 | USESSP }, /* movt rn */
1438 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1439 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1440 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1441 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1442 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn */
1443 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1444 { 0x0083, LOAD | USES1 }, /* pref @rn */
1445 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1446 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1447 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1448 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1449 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1450 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1451 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1454 static const struct sh_opcode sh_opcode02[] =
1456 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1457 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1458 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1459 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1460 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1461 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1462 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1463 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1466 static const struct sh_minor_opcode sh_opcode0[] =
1468 { MAP (sh_opcode00), 0xffff },
1469 { MAP (sh_opcode01), 0xf0ff },
1470 { MAP (sh_opcode02), 0xf00f }
1473 static const struct sh_opcode sh_opcode10[] =
1475 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1478 static const struct sh_minor_opcode sh_opcode1[] =
1480 { MAP (sh_opcode10), 0xf000 }
1483 static const struct sh_opcode sh_opcode20[] =
1485 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1486 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1487 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1488 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1489 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1490 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1491 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1492 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1493 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1494 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1495 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1496 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1497 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1498 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1499 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1502 static const struct sh_minor_opcode sh_opcode2[] =
1504 { MAP (sh_opcode20), 0xf00f }
1507 static const struct sh_opcode sh_opcode30[] =
1509 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1510 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1511 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1512 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1513 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1514 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1515 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1516 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1517 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1518 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1519 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1520 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1521 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1522 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1525 static const struct sh_minor_opcode sh_opcode3[] =
1527 { MAP (sh_opcode30), 0xf00f }
1530 static const struct sh_opcode sh_opcode40[] =
1532 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1533 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1534 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1535 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1536 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1537 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1538 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1539 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1540 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1541 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1542 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1543 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1544 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1545 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1546 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1547 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1548 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1549 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1550 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1551 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1552 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1553 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1554 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1555 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1556 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1557 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1558 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1559 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1560 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1561 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1562 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1563 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1564 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1565 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1566 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1567 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1568 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1569 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1570 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1571 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1572 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1573 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1574 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1575 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1576 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1577 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1578 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1579 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr,@-rn */
1580 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr */
1581 { 0x406a, SETSSP | USES1 } /* lds rm,fpscr */
1584 static const struct sh_opcode sh_opcode41[] =
1586 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l rx_bank,@-rn */
1587 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rx_bank */
1588 { 0x408e, SETSSP | USES1 } /* ldc rm,rx_bank */
1591 static const struct sh_opcode sh_opcode42[] =
1593 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1594 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1595 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1598 static const struct sh_minor_opcode sh_opcode4[] =
1600 { MAP (sh_opcode40), 0xf0ff },
1601 { MAP (sh_opcode41), 0xf08f },
1602 { MAP (sh_opcode42), 0xf00f }
1605 static const struct sh_opcode sh_opcode50[] =
1607 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1610 static const struct sh_minor_opcode sh_opcode5[] =
1612 { MAP (sh_opcode50), 0xf000 }
1615 static const struct sh_opcode sh_opcode60[] =
1617 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1618 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1619 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1620 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1621 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1622 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1623 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1624 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1625 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1626 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1627 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1628 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1629 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1630 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1631 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1632 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1635 static const struct sh_minor_opcode sh_opcode6[] =
1637 { MAP (sh_opcode60), 0xf00f }
1640 static const struct sh_opcode sh_opcode70[] =
1642 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1645 static const struct sh_minor_opcode sh_opcode7[] =
1647 { MAP (sh_opcode70), 0xf000 }
1650 static const struct sh_opcode sh_opcode80[] =
1652 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1653 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1654 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1655 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1656 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1657 { 0x8900, BRANCH | USESSP }, /* bt label */
1658 { 0x8b00, BRANCH | USESSP }, /* bf label */
1659 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1660 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1663 static const struct sh_minor_opcode sh_opcode8[] =
1665 { MAP (sh_opcode80), 0xff00 }
1668 static const struct sh_opcode sh_opcode90[] =
1670 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1673 static const struct sh_minor_opcode sh_opcode9[] =
1675 { MAP (sh_opcode90), 0xf000 }
1678 static const struct sh_opcode sh_opcodea0[] =
1680 { 0xa000, BRANCH | DELAY } /* bra label */
1683 static const struct sh_minor_opcode sh_opcodea[] =
1685 { MAP (sh_opcodea0), 0xf000 }
1688 static const struct sh_opcode sh_opcodeb0[] =
1690 { 0xb000, BRANCH | DELAY } /* bsr label */
1693 static const struct sh_minor_opcode sh_opcodeb[] =
1695 { MAP (sh_opcodeb0), 0xf000 }
1698 static const struct sh_opcode sh_opcodec0[] =
1700 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1701 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1702 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1703 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1704 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1705 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1706 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1707 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1708 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1709 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1710 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1711 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1712 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1713 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1714 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1715 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1718 static const struct sh_minor_opcode sh_opcodec[] =
1720 { MAP (sh_opcodec0), 0xff00 }
1723 static const struct sh_opcode sh_opcoded0[] =
1725 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1728 static const struct sh_minor_opcode sh_opcoded[] =
1730 { MAP (sh_opcoded0), 0xf000 }
1733 static const struct sh_opcode sh_opcodee0[] =
1735 { 0xe000, SETS1 } /* mov #imm,rn */
1738 static const struct sh_minor_opcode sh_opcodee[] =
1740 { MAP (sh_opcodee0), 0xf000 }
1743 static const struct sh_opcode sh_opcodef0[] =
1745 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1746 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1747 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1748 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1749 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1750 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1751 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1752 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1753 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1754 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1755 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1756 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1757 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1758 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1761 static const struct sh_opcode sh_opcodef1[] =
1763 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1764 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1765 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1766 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1767 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1768 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1769 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1770 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1771 { 0xf08d, SETSF1 }, /* fldi0 fn */
1772 { 0xf09d, SETSF1 } /* fldi1 fn */
1775 static const struct sh_minor_opcode sh_opcodef[] =
1777 { MAP (sh_opcodef0), 0xf00f },
1778 { MAP (sh_opcodef1), 0xf0ff }
1781 static const struct sh_major_opcode sh_opcodes[] =
1783 { MAP (sh_opcode0) },
1784 { MAP (sh_opcode1) },
1785 { MAP (sh_opcode2) },
1786 { MAP (sh_opcode3) },
1787 { MAP (sh_opcode4) },
1788 { MAP (sh_opcode5) },
1789 { MAP (sh_opcode6) },
1790 { MAP (sh_opcode7) },
1791 { MAP (sh_opcode8) },
1792 { MAP (sh_opcode9) },
1793 { MAP (sh_opcodea) },
1794 { MAP (sh_opcodeb) },
1795 { MAP (sh_opcodec) },
1796 { MAP (sh_opcoded) },
1797 { MAP (sh_opcodee) },
1798 { MAP (sh_opcodef) }
1801 /* Given an instruction, return a pointer to the corresponding
1802 sh_opcode structure. Return NULL if the instruction is not
1805 static const struct sh_opcode *
1809 const struct sh_major_opcode *maj;
1810 const struct sh_minor_opcode *min, *minend;
1812 maj = &sh_opcodes[(insn & 0xf000) >> 12];
1813 min = maj->minor_opcodes;
1814 minend = min + maj->count;
1815 for (; min < minend; min++)
1818 const struct sh_opcode *op, *opend;
1820 l = insn & min->mask;
1822 opend = op + min->count;
1824 /* Since the opcodes tables are sorted, we could use a binary
1825 search here if the count were above some cutoff value. */
1826 for (; op < opend; op++)
1827 if (op->opcode == l)
1834 /* See whether an instruction uses a general purpose register. */
1837 sh_insn_uses_reg (insn, op, reg)
1839 const struct sh_opcode *op;
1846 if ((f & USES1) != 0
1847 && ((insn & 0x0f00) >> 8) == reg)
1849 if ((f & USES2) != 0
1850 && ((insn & 0x00f0) >> 4) == reg)
1852 if ((f & USESR0) != 0
1859 /* See whether an instruction uses a floating point register. */
1862 sh_insn_uses_freg (insn, op, freg)
1864 const struct sh_opcode *op;
1871 if ((f & USESF1) != 0
1872 && ((insn & 0x0f00) >> 8) == freg)
1874 if ((f & USESF2) != 0
1875 && ((insn & 0x00f0) >> 4) == freg)
1877 if ((f & USESF0) != 0
1884 /* See whether instructions I1 and I2 conflict, assuming I1 comes
1885 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
1886 This should return true if the instructions can be swapped safely. */
1889 sh_insns_conflict (i1, op1, i2, op2)
1891 const struct sh_opcode *op1;
1893 const struct sh_opcode *op2;
1895 unsigned int f1, f2;
1900 if ((f1 & (BRANCH | DELAY)) != 0
1901 || (f2 & (BRANCH | DELAY)) != 0)
1904 if ((f1 & SETSSP) != 0 && (f2 & USESSP) != 0)
1906 if ((f2 & SETSSP) != 0 && (f1 & USESSP) != 0)
1909 if ((f1 & SETS1) != 0
1910 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
1912 if ((f1 & SETS2) != 0
1913 && sh_insn_uses_reg (i2, op2, (i1 & 0x00f0) >> 4))
1915 if ((f1 & SETSR0) != 0
1916 && sh_insn_uses_reg (i2, op2, 0))
1918 if ((f1 & SETSF1) != 0
1919 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
1922 if ((f2 & SETS1) != 0
1923 && sh_insn_uses_reg (i1, op1, (i2 & 0x0f00) >> 8))
1925 if ((f2 & SETS2) != 0
1926 && sh_insn_uses_reg (i1, op1, (i2 & 0x00f0) >> 4))
1928 if ((f2 & SETSR0) != 0
1929 && sh_insn_uses_reg (i1, op1, 0))
1931 if ((f2 & SETSF1) != 0
1932 && sh_insn_uses_freg (i1, op1, (i2 & 0x0f00) >> 8))
1935 /* The instructions do not conflict. */
1939 /* I1 is a load instruction, and I2 is some other instruction. Return
1940 true if I1 loads a register which I2 uses. */
1943 sh_load_use (i1, op1, i2, op2)
1945 const struct sh_opcode *op1;
1947 const struct sh_opcode *op2;
1953 if ((f1 & LOAD) == 0)
1956 /* If both SETS1 and SETSSP are set, that means a load to a special
1957 register using postincrement addressing mode, which we don't care
1959 if ((f1 & SETS1) != 0
1960 && (f1 & SETSSP) == 0
1961 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
1964 if ((f1 & SETSR0) != 0
1965 && sh_insn_uses_reg (i2, op2, 0))
1968 if ((f1 & SETSF1) != 0
1969 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
1975 /* Look for loads and stores which we can align to four byte
1976 boundaries. See the longer comment above sh_relax_section for why
1977 this is desirable. This sets *PSWAPPED if some instruction was
1981 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
1984 struct internal_reloc *internal_relocs;
1988 struct internal_reloc *irel, *irelend;
1989 bfd_vma *labels = NULL;
1990 bfd_vma *label, *label_end;
1994 irelend = internal_relocs + sec->reloc_count;
1996 /* Get all the addresses with labels on them. */
1997 labels = (bfd_vma *) bfd_malloc (sec->reloc_count * sizeof (bfd_vma));
2001 for (irel = internal_relocs; irel < irelend; irel++)
2003 if (irel->r_type == R_SH_LABEL)
2005 *label_end = irel->r_vaddr - sec->vma;
2010 /* Note that the assembler currently always outputs relocs in
2011 address order. If that ever changes, this code will need to sort
2012 the label values and the relocs. */
2016 for (irel = internal_relocs; irel < irelend; irel++)
2018 bfd_vma start, stop, i;
2020 if (irel->r_type != R_SH_CODE)
2023 start = irel->r_vaddr - sec->vma;
2025 for (irel++; irel < irelend; irel++)
2026 if (irel->r_type == R_SH_DATA)
2029 stop = irel->r_vaddr - sec->vma;
2031 stop = sec->_cooked_size;
2033 /* Instructions should be aligned on 2 byte boundaries. */
2034 if ((start & 1) == 1)
2037 /* Now look through the unaligned addresses. */
2041 for (; i < stop; i += 4)
2044 const struct sh_opcode *op;
2045 unsigned int prev_insn = 0;
2046 const struct sh_opcode *prev_op = NULL;
2048 insn = bfd_get_16 (abfd, contents + i);
2049 op = sh_insn_info (insn);
2051 || (op->flags & (LOAD | STORE)) == 0)
2054 /* This is a load or store which is not on a four byte
2057 while (label < label_end && *label < i)
2062 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2063 prev_op = sh_insn_info (prev_insn);
2065 /* If the load/store instruction is in a delay slot, we
2068 || (prev_op->flags & DELAY) != 0)
2072 && (label >= label_end || *label != i)
2074 && (prev_op->flags & (LOAD | STORE)) == 0
2075 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2079 /* The load/store instruction does not have a label, and
2080 there is a previous instruction; PREV_INSN is not
2081 itself a load/store instruction, and PREV_INSN and
2082 INSN do not conflict. */
2088 unsigned int prev2_insn;
2089 const struct sh_opcode *prev2_op;
2091 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2092 prev2_op = sh_insn_info (prev2_insn);
2094 /* If the instruction before PREV_INSN has a delay
2095 slot--that is, PREV_INSN is in a delay slot--we
2097 if (prev2_op == NULL
2098 || (prev2_op->flags & DELAY) != 0)
2101 /* If the instruction before PREV_INSN is a load,
2102 and it sets a register which INSN uses, then
2103 putting INSN immediately after PREV_INSN will
2104 cause a pipeline bubble, so there is no point to
2107 && (prev2_op->flags & LOAD) != 0
2108 && sh_load_use (prev2_insn, prev2_op, insn, op))
2114 if (! sh_swap_insns (abfd, sec, internal_relocs,
2122 while (label < label_end && *label < i + 2)
2126 && (label >= label_end || *label != i + 2))
2128 unsigned int next_insn;
2129 const struct sh_opcode *next_op;
2131 /* There is an instruction after the load/store
2132 instruction, and it does not have a label. */
2133 next_insn = bfd_get_16 (abfd, contents + i + 2);
2134 next_op = sh_insn_info (next_insn);
2136 && (next_op->flags & (LOAD | STORE)) == 0
2137 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2141 /* NEXT_INSN is not itself a load/store instruction,
2142 and it does not conflict with INSN. */
2146 /* If PREV_INSN is a load, and it sets a register
2147 which NEXT_INSN uses, then putting NEXT_INSN
2148 immediately after PREV_INSN will cause a pipeline
2149 bubble, so there is no reason to make this swap. */
2151 && (prev_op->flags & LOAD) != 0
2152 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2155 /* If INSN is a load, and it sets a register which
2156 the insn after NEXT_INSN uses, then doing the
2157 swap will cause a pipeline bubble, so there is no
2158 reason to make the swap. However, if the insn
2159 after NEXT_INSN is itself a load or store
2160 instruction, then it is misaligned, so
2161 optimistically hope that it will be swapped
2162 itself, and just live with the pipeline bubble if
2166 && (op->flags & LOAD) != 0)
2168 unsigned int next2_insn;
2169 const struct sh_opcode *next2_op;
2171 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2172 next2_op = sh_insn_info (next2_insn);
2173 if ((next2_op->flags & (LOAD | STORE)) == 0
2174 && sh_load_use (insn, op, next2_insn, next2_op))
2180 if (! sh_swap_insns (abfd, sec, internal_relocs,
2201 /* Swap two SH instructions. */
2204 sh_swap_insns (abfd, sec, internal_relocs, contents, addr)
2207 struct internal_reloc *internal_relocs;
2211 unsigned short i1, i2;
2212 struct internal_reloc *irel, *irelend;
2214 /* Swap the instructions themselves. */
2215 i1 = bfd_get_16 (abfd, contents + addr);
2216 i2 = bfd_get_16 (abfd, contents + addr + 2);
2217 bfd_put_16 (abfd, i2, contents + addr);
2218 bfd_put_16 (abfd, i1, contents + addr + 2);
2220 /* Adjust all reloc addresses. */
2221 irelend = internal_relocs + sec->reloc_count;
2222 for (irel = internal_relocs; irel < irelend; irel++)
2226 /* There are a few special types of relocs that we don't want to
2227 adjust. These relocs do not apply to the instruction itself,
2228 but are only associated with the address. */
2229 type = irel->r_type;
2230 if (type == R_SH_ALIGN
2231 || type == R_SH_CODE
2232 || type == R_SH_DATA
2233 || type == R_SH_LABEL)
2236 /* If an R_SH_USES reloc points to one of the addresses being
2237 swapped, we must adjust it. It would be incorrect to do this
2238 for a jump, though, since we want to execute both
2239 instructions after the jump. (We have avoided swapping
2240 around a label, so the jump will not wind up executing an
2241 instruction it shouldn't). */
2242 if (type == R_SH_USES)
2246 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2248 irel->r_offset += 2;
2249 else if (off == addr + 2)
2250 irel->r_offset -= 2;
2253 if (irel->r_vaddr - sec->vma == addr)
2258 else if (irel->r_vaddr - sec->vma == addr + 2)
2269 unsigned short insn, oinsn;
2272 loc = contents + irel->r_vaddr - sec->vma;
2279 case R_SH_PCDISP8BY2:
2280 case R_SH_PCRELIMM8BY2:
2281 insn = bfd_get_16 (abfd, loc);
2284 if ((oinsn & 0xff00) != (insn & 0xff00))
2286 bfd_put_16 (abfd, insn, loc);
2290 insn = bfd_get_16 (abfd, loc);
2293 if ((oinsn & 0xf000) != (insn & 0xf000))
2295 bfd_put_16 (abfd, insn, loc);
2298 case R_SH_PCRELIMM8BY4:
2299 /* This reloc ignores the least significant 3 bits of
2300 the program counter before adding in the offset.
2301 This means that if ADDR is at an even address, the
2302 swap will not affect the offset. If ADDR is an at an
2303 odd address, then the instruction will be crossing a
2304 four byte boundary, and must be adjusted. */
2305 if ((addr & 3) != 0)
2307 insn = bfd_get_16 (abfd, loc);
2310 if ((oinsn & 0xff00) != (insn & 0xff00))
2312 bfd_put_16 (abfd, insn, loc);
2320 ((*_bfd_error_handler)
2321 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2322 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
2323 bfd_set_error (bfd_error_bad_value);
2332 /* This is a modification of _bfd_coff_generic_relocate_section, which
2333 will handle SH relaxing. */
2336 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2337 relocs, syms, sections)
2339 struct bfd_link_info *info;
2341 asection *input_section;
2343 struct internal_reloc *relocs;
2344 struct internal_syment *syms;
2345 asection **sections;
2347 struct internal_reloc *rel;
2348 struct internal_reloc *relend;
2351 relend = rel + input_section->reloc_count;
2352 for (; rel < relend; rel++)
2355 struct coff_link_hash_entry *h;
2356 struct internal_syment *sym;
2359 reloc_howto_type *howto;
2360 bfd_reloc_status_type rstat;
2362 /* Almost all relocs have to do with relaxing. If any work must
2363 be done for them, it has been done in sh_relax_section. */
2364 if (rel->r_type != R_SH_IMM32
2365 && rel->r_type != R_SH_PCDISP)
2368 symndx = rel->r_symndx;
2377 h = obj_coff_sym_hashes (input_bfd)[symndx];
2378 sym = syms + symndx;
2381 if (sym != NULL && sym->n_scnum != 0)
2382 addend = - sym->n_value;
2386 if (rel->r_type == R_SH_PCDISP)
2389 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2392 howto = &sh_coff_howtos[rel->r_type];
2396 bfd_set_error (bfd_error_bad_value);
2406 /* There is nothing to do for an internal PCDISP reloc. */
2407 if (rel->r_type == R_SH_PCDISP)
2412 sec = bfd_abs_section_ptr;
2417 sec = sections[symndx];
2418 val = (sec->output_section->vma
2419 + sec->output_offset
2426 if (h->root.type == bfd_link_hash_defined
2427 || h->root.type == bfd_link_hash_defweak)
2431 sec = h->root.u.def.section;
2432 val = (h->root.u.def.value
2433 + sec->output_section->vma
2434 + sec->output_offset);
2436 else if (! info->relocateable)
2438 if (! ((*info->callbacks->undefined_symbol)
2439 (info, h->root.root.string, input_bfd, input_section,
2440 rel->r_vaddr - input_section->vma)))
2445 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2447 rel->r_vaddr - input_section->vma,
2456 case bfd_reloc_overflow:
2459 char buf[SYMNMLEN + 1];
2464 name = h->root.root.string;
2465 else if (sym->_n._n_n._n_zeroes == 0
2466 && sym->_n._n_n._n_offset != 0)
2467 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2470 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2471 buf[SYMNMLEN] = '\0';
2475 if (! ((*info->callbacks->reloc_overflow)
2476 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2477 input_section, rel->r_vaddr - input_section->vma)))
2486 /* This is a version of bfd_generic_get_relocated_section_contents
2487 which uses sh_relocate_section. */
2490 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2491 data, relocateable, symbols)
2493 struct bfd_link_info *link_info;
2494 struct bfd_link_order *link_order;
2496 boolean relocateable;
2499 asection *input_section = link_order->u.indirect.section;
2500 bfd *input_bfd = input_section->owner;
2501 asection **sections = NULL;
2502 struct internal_reloc *internal_relocs = NULL;
2503 struct internal_syment *internal_syms = NULL;
2505 /* We only need to handle the case of relaxing, or of having a
2506 particular set of section contents, specially. */
2508 || coff_section_data (input_bfd, input_section) == NULL
2509 || coff_section_data (input_bfd, input_section)->contents == NULL)
2510 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2515 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2516 input_section->_raw_size);
2518 if ((input_section->flags & SEC_RELOC) != 0
2519 && input_section->reloc_count > 0)
2521 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2522 bfd_byte *esym, *esymend;
2523 struct internal_syment *isymp;
2526 if (! _bfd_coff_get_external_symbols (input_bfd))
2529 internal_relocs = (_bfd_coff_read_internal_relocs
2530 (input_bfd, input_section, false, (bfd_byte *) NULL,
2531 false, (struct internal_reloc *) NULL));
2532 if (internal_relocs == NULL)
2535 internal_syms = ((struct internal_syment *)
2536 bfd_malloc (obj_raw_syment_count (input_bfd)
2537 * sizeof (struct internal_syment)));
2538 if (internal_syms == NULL)
2541 sections = (asection **) bfd_malloc (obj_raw_syment_count (input_bfd)
2542 * sizeof (asection *));
2543 if (sections == NULL)
2546 isymp = internal_syms;
2548 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2549 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2550 while (esym < esymend)
2552 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
2554 if (isymp->n_scnum != 0)
2555 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2558 if (isymp->n_value == 0)
2559 *secpp = bfd_und_section_ptr;
2561 *secpp = bfd_com_section_ptr;
2564 esym += (isymp->n_numaux + 1) * symesz;
2565 secpp += isymp->n_numaux + 1;
2566 isymp += isymp->n_numaux + 1;
2569 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2570 input_section, data, internal_relocs,
2571 internal_syms, sections))
2576 free (internal_syms);
2577 internal_syms = NULL;
2578 free (internal_relocs);
2579 internal_relocs = NULL;
2585 if (internal_relocs != NULL)
2586 free (internal_relocs);
2587 if (internal_syms != NULL)
2588 free (internal_syms);
2589 if (sections != NULL)
2594 /* The target vectors. */
2596 const bfd_target shcoff_vec =
2598 "coff-sh", /* name */
2599 bfd_target_coff_flavour,
2600 BFD_ENDIAN_BIG, /* data byte order is big */
2601 BFD_ENDIAN_BIG, /* header byte order is big */
2603 (HAS_RELOC | EXEC_P | /* object flags */
2604 HAS_LINENO | HAS_DEBUG |
2605 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2607 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2608 '_', /* leading symbol underscore */
2609 '/', /* ar_pad_char */
2610 15, /* ar_max_namelen */
2611 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2612 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2613 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
2614 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2615 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2616 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
2618 {_bfd_dummy_target, coff_object_p, /* bfd_check_format */
2619 bfd_generic_archive_p, _bfd_dummy_target},
2620 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2622 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2623 _bfd_write_archive_contents, bfd_false},
2625 BFD_JUMP_TABLE_GENERIC (coff),
2626 BFD_JUMP_TABLE_COPY (coff),
2627 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2628 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2629 BFD_JUMP_TABLE_SYMBOLS (coff),
2630 BFD_JUMP_TABLE_RELOCS (coff),
2631 BFD_JUMP_TABLE_WRITE (coff),
2632 BFD_JUMP_TABLE_LINK (coff),
2633 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
2638 const bfd_target shlcoff_vec =
2640 "coff-shl", /* name */
2641 bfd_target_coff_flavour,
2642 BFD_ENDIAN_LITTLE, /* data byte order is little */
2643 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
2645 (HAS_RELOC | EXEC_P | /* object flags */
2646 HAS_LINENO | HAS_DEBUG |
2647 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2649 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2650 '_', /* leading symbol underscore */
2651 '/', /* ar_pad_char */
2652 15, /* ar_max_namelen */
2653 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2654 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2655 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
2656 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2657 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2658 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
2660 {_bfd_dummy_target, coff_object_p, /* bfd_check_format */
2661 bfd_generic_archive_p, _bfd_dummy_target},
2662 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2664 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2665 _bfd_write_archive_contents, bfd_false},
2667 BFD_JUMP_TABLE_GENERIC (coff),
2668 BFD_JUMP_TABLE_COPY (coff),
2669 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2670 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2671 BFD_JUMP_TABLE_SYMBOLS (coff),
2672 BFD_JUMP_TABLE_RELOCS (coff),
2673 BFD_JUMP_TABLE_WRITE (coff),
2674 BFD_JUMP_TABLE_LINK (coff),
2675 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),