1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 94, 95, 96, 97, 98, 1999, 2000 Free Software Foundation, Inc.
3 Contributed by Cygnus Support.
7 This file is part of BFD, the Binary File Descriptor library.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
28 #include "coff/internal.h"
31 /* Internal functions. */
32 static bfd_reloc_status_type sh_reloc
33 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
34 static long get_symbol_value PARAMS ((asymbol *));
35 static boolean sh_relax_section
36 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
37 static boolean sh_relax_delete_bytes
38 PARAMS ((bfd *, asection *, bfd_vma, int));
39 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
40 static boolean sh_align_loads
41 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
42 static boolean sh_swap_insns
43 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
44 static boolean sh_relocate_section
45 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
46 struct internal_reloc *, struct internal_syment *, asection **));
47 static bfd_byte *sh_coff_get_relocated_section_contents
48 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
49 bfd_byte *, boolean, asymbol **));
51 /* Default section alignment to 2**4. */
52 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER (4)
54 /* Generate long file names. */
55 #define COFF_LONG_FILENAMES
57 /* The supported relocations. There are a lot of relocations defined
58 in coff/internal.h which we do not expect to ever see. */
59 static reloc_howto_type sh_coff_howtos[] =
64 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
65 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
66 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
67 EMPTY_HOWTO (6), /* R_SH_IMM24 */
68 EMPTY_HOWTO (7), /* R_SH_LOW16 */
70 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
72 HOWTO (R_SH_PCDISP8BY2, /* type */
74 1, /* size (0 = byte, 1 = short, 2 = long) */
76 true, /* pc_relative */
78 complain_overflow_signed, /* complain_on_overflow */
79 sh_reloc, /* special_function */
80 "r_pcdisp8by2", /* name */
81 true, /* partial_inplace */
84 true), /* pcrel_offset */
86 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
88 HOWTO (R_SH_PCDISP, /* type */
90 1, /* size (0 = byte, 1 = short, 2 = long) */
92 true, /* pc_relative */
94 complain_overflow_signed, /* complain_on_overflow */
95 sh_reloc, /* special_function */
96 "r_pcdisp12by2", /* name */
97 true, /* partial_inplace */
100 true), /* pcrel_offset */
104 HOWTO (R_SH_IMM32, /* type */
106 2, /* size (0 = byte, 1 = short, 2 = long) */
108 false, /* pc_relative */
110 complain_overflow_bitfield, /* complain_on_overflow */
111 sh_reloc, /* special_function */
112 "r_imm32", /* name */
113 true, /* partial_inplace */
114 0xffffffff, /* src_mask */
115 0xffffffff, /* dst_mask */
116 false), /* pcrel_offset */
119 EMPTY_HOWTO (16), /* R_SH_IMM8 */
120 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
121 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
122 EMPTY_HOWTO (19), /* R_SH_IMM4 */
123 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
124 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
126 HOWTO (R_SH_PCRELIMM8BY2, /* type */
128 1, /* size (0 = byte, 1 = short, 2 = long) */
130 true, /* pc_relative */
132 complain_overflow_unsigned, /* complain_on_overflow */
133 sh_reloc, /* special_function */
134 "r_pcrelimm8by2", /* name */
135 true, /* partial_inplace */
138 true), /* pcrel_offset */
140 HOWTO (R_SH_PCRELIMM8BY4, /* type */
142 1, /* size (0 = byte, 1 = short, 2 = long) */
144 true, /* pc_relative */
146 complain_overflow_unsigned, /* complain_on_overflow */
147 sh_reloc, /* special_function */
148 "r_pcrelimm8by4", /* name */
149 true, /* partial_inplace */
152 true), /* pcrel_offset */
154 HOWTO (R_SH_IMM16, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 false, /* pc_relative */
160 complain_overflow_bitfield, /* complain_on_overflow */
161 sh_reloc, /* special_function */
162 "r_imm16", /* name */
163 true, /* partial_inplace */
164 0xffff, /* src_mask */
165 0xffff, /* dst_mask */
166 false), /* pcrel_offset */
168 HOWTO (R_SH_SWITCH16, /* type */
170 1, /* size (0 = byte, 1 = short, 2 = long) */
172 false, /* pc_relative */
174 complain_overflow_bitfield, /* complain_on_overflow */
175 sh_reloc, /* special_function */
176 "r_switch16", /* name */
177 true, /* partial_inplace */
178 0xffff, /* src_mask */
179 0xffff, /* dst_mask */
180 false), /* pcrel_offset */
182 HOWTO (R_SH_SWITCH32, /* type */
184 2, /* size (0 = byte, 1 = short, 2 = long) */
186 false, /* pc_relative */
188 complain_overflow_bitfield, /* complain_on_overflow */
189 sh_reloc, /* special_function */
190 "r_switch32", /* name */
191 true, /* partial_inplace */
192 0xffffffff, /* src_mask */
193 0xffffffff, /* dst_mask */
194 false), /* pcrel_offset */
196 HOWTO (R_SH_USES, /* type */
198 1, /* size (0 = byte, 1 = short, 2 = long) */
200 false, /* pc_relative */
202 complain_overflow_bitfield, /* complain_on_overflow */
203 sh_reloc, /* special_function */
205 true, /* partial_inplace */
206 0xffff, /* src_mask */
207 0xffff, /* dst_mask */
208 false), /* pcrel_offset */
210 HOWTO (R_SH_COUNT, /* type */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
214 false, /* pc_relative */
216 complain_overflow_bitfield, /* complain_on_overflow */
217 sh_reloc, /* special_function */
218 "r_count", /* name */
219 true, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 false), /* pcrel_offset */
224 HOWTO (R_SH_ALIGN, /* type */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
228 false, /* pc_relative */
230 complain_overflow_bitfield, /* complain_on_overflow */
231 sh_reloc, /* special_function */
232 "r_align", /* name */
233 true, /* partial_inplace */
234 0xffffffff, /* src_mask */
235 0xffffffff, /* dst_mask */
236 false), /* pcrel_offset */
238 HOWTO (R_SH_CODE, /* type */
240 2, /* size (0 = byte, 1 = short, 2 = long) */
242 false, /* pc_relative */
244 complain_overflow_bitfield, /* complain_on_overflow */
245 sh_reloc, /* special_function */
247 true, /* partial_inplace */
248 0xffffffff, /* src_mask */
249 0xffffffff, /* dst_mask */
250 false), /* pcrel_offset */
252 HOWTO (R_SH_DATA, /* type */
254 2, /* size (0 = byte, 1 = short, 2 = long) */
256 false, /* pc_relative */
258 complain_overflow_bitfield, /* complain_on_overflow */
259 sh_reloc, /* special_function */
261 true, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 false), /* pcrel_offset */
266 HOWTO (R_SH_LABEL, /* type */
268 2, /* size (0 = byte, 1 = short, 2 = long) */
270 false, /* pc_relative */
272 complain_overflow_bitfield, /* complain_on_overflow */
273 sh_reloc, /* special_function */
274 "r_label", /* name */
275 true, /* partial_inplace */
276 0xffffffff, /* src_mask */
277 0xffffffff, /* dst_mask */
278 false), /* pcrel_offset */
280 HOWTO (R_SH_SWITCH8, /* type */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
284 false, /* pc_relative */
286 complain_overflow_bitfield, /* complain_on_overflow */
287 sh_reloc, /* special_function */
288 "r_switch8", /* name */
289 true, /* partial_inplace */
292 false) /* pcrel_offset */
295 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
297 /* Check for a bad magic number. */
298 #define BADMAG(x) SHBADMAG(x)
300 /* Customize coffcode.h (this is not currently used). */
303 /* FIXME: This should not be set here. */
304 #define __A_MAGIC_SET__
306 /* Swap the r_offset field in and out. */
307 #define SWAP_IN_RELOC_OFFSET bfd_h_get_32
308 #define SWAP_OUT_RELOC_OFFSET bfd_h_put_32
310 /* Swap out extra information in the reloc structure. */
311 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
314 dst->r_stuff[0] = 'S'; \
315 dst->r_stuff[1] = 'C'; \
319 /* Get the value of a symbol, when performing a relocation. */
322 get_symbol_value (symbol)
327 if (bfd_is_com_section (symbol->section))
330 relocation = (symbol->value +
331 symbol->section->output_section->vma +
332 symbol->section->output_offset);
337 /* This macro is used in coffcode.h to get the howto corresponding to
338 an internal reloc. */
340 #define RTYPE2HOWTO(relent, internal) \
342 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
343 ? &sh_coff_howtos[(internal)->r_type] \
344 : (reloc_howto_type *) NULL))
346 /* This is the same as the macro in coffcode.h, except that it copies
347 r_offset into reloc_entry->addend for some relocs. */
348 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
350 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
351 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
352 coffsym = (obj_symbols (abfd) \
353 + (cache_ptr->sym_ptr_ptr - symbols)); \
355 coffsym = coff_symbol_from (abfd, ptr); \
356 if (coffsym != (coff_symbol_type *) NULL \
357 && coffsym->native->u.syment.n_scnum == 0) \
358 cache_ptr->addend = 0; \
359 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
360 && ptr->section != (asection *) NULL) \
361 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
363 cache_ptr->addend = 0; \
364 if ((reloc).r_type == R_SH_SWITCH8 \
365 || (reloc).r_type == R_SH_SWITCH16 \
366 || (reloc).r_type == R_SH_SWITCH32 \
367 || (reloc).r_type == R_SH_USES \
368 || (reloc).r_type == R_SH_COUNT \
369 || (reloc).r_type == R_SH_ALIGN) \
370 cache_ptr->addend = (reloc).r_offset; \
373 /* This is the howto function for the SH relocations. */
375 static bfd_reloc_status_type
376 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
379 arelent *reloc_entry;
382 asection *input_section;
384 char **error_message ATTRIBUTE_UNUSED;
388 unsigned short r_type;
389 bfd_vma addr = reloc_entry->address;
390 bfd_byte *hit_data = addr + (bfd_byte *) data;
392 r_type = reloc_entry->howto->type;
394 if (output_bfd != NULL)
396 /* Partial linking--do nothing. */
397 reloc_entry->address += input_section->output_offset;
401 /* Almost all relocs have to do with relaxing. If any work must be
402 done for them, it has been done in sh_relax_section. */
403 if (r_type != R_SH_IMM32
404 && (r_type != R_SH_PCDISP
405 || (symbol_in->flags & BSF_LOCAL) != 0))
408 if (symbol_in != NULL
409 && bfd_is_und_section (symbol_in->section))
410 return bfd_reloc_undefined;
412 sym_value = get_symbol_value (symbol_in);
417 insn = bfd_get_32 (abfd, hit_data);
418 insn += sym_value + reloc_entry->addend;
419 bfd_put_32 (abfd, insn, hit_data);
422 insn = bfd_get_16 (abfd, hit_data);
423 sym_value += reloc_entry->addend;
424 sym_value -= (input_section->output_section->vma
425 + input_section->output_offset
428 sym_value += (insn & 0xfff) << 1;
431 insn = (insn & 0xf000) | (sym_value & 0xfff);
432 bfd_put_16 (abfd, insn, hit_data);
433 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
434 return bfd_reloc_overflow;
444 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
446 /* We can do relaxing. */
447 #define coff_bfd_relax_section sh_relax_section
449 /* We use the special COFF backend linker. */
450 #define coff_relocate_section sh_relocate_section
452 /* When relaxing, we need to use special code to get the relocated
454 #define coff_bfd_get_relocated_section_contents \
455 sh_coff_get_relocated_section_contents
457 #include "coffcode.h"
459 /* This function handles relaxing on the SH.
461 Function calls on the SH look like this:
470 The compiler and assembler will cooperate to create R_SH_USES
471 relocs on the jsr instructions. The r_offset field of the
472 R_SH_USES reloc is the PC relative offset to the instruction which
473 loads the register (the r_offset field is computed as though it
474 were a jump instruction, so the offset value is actually from four
475 bytes past the instruction). The linker can use this reloc to
476 determine just which function is being called, and thus decide
477 whether it is possible to replace the jsr with a bsr.
479 If multiple function calls are all based on a single register load
480 (i.e., the same function is called multiple times), the compiler
481 guarantees that each function call will have an R_SH_USES reloc.
482 Therefore, if the linker is able to convert each R_SH_USES reloc
483 which refers to that address, it can safely eliminate the register
486 When the assembler creates an R_SH_USES reloc, it examines it to
487 determine which address is being loaded (L1 in the above example).
488 It then counts the number of references to that address, and
489 creates an R_SH_COUNT reloc at that address. The r_offset field of
490 the R_SH_COUNT reloc will be the number of references. If the
491 linker is able to eliminate a register load, it can use the
492 R_SH_COUNT reloc to see whether it can also eliminate the function
495 SH relaxing also handles another, unrelated, matter. On the SH, if
496 a load or store instruction is not aligned on a four byte boundary,
497 the memory cycle interferes with the 32 bit instruction fetch,
498 causing a one cycle bubble in the pipeline. Therefore, we try to
499 align load and store instructions on four byte boundaries if we
500 can, by swapping them with one of the adjacent instructions. */
503 sh_relax_section (abfd, sec, link_info, again)
506 struct bfd_link_info *link_info;
509 struct internal_reloc *internal_relocs;
510 struct internal_reloc *free_relocs = NULL;
512 struct internal_reloc *irel, *irelend;
513 bfd_byte *contents = NULL;
514 bfd_byte *free_contents = NULL;
518 if (link_info->relocateable
519 || (sec->flags & SEC_RELOC) == 0
520 || sec->reloc_count == 0)
523 /* If this is the first time we have been called for this section,
524 initialize the cooked size. */
525 if (sec->_cooked_size == 0)
526 sec->_cooked_size = sec->_raw_size;
528 internal_relocs = (_bfd_coff_read_internal_relocs
529 (abfd, sec, link_info->keep_memory,
530 (bfd_byte *) NULL, false,
531 (struct internal_reloc *) NULL));
532 if (internal_relocs == NULL)
534 if (! link_info->keep_memory)
535 free_relocs = internal_relocs;
539 irelend = internal_relocs + sec->reloc_count;
540 for (irel = internal_relocs; irel < irelend; irel++)
542 bfd_vma laddr, paddr, symval;
544 struct internal_reloc *irelfn, *irelscan, *irelcount;
545 struct internal_syment sym;
548 if (irel->r_type == R_SH_CODE)
551 if (irel->r_type != R_SH_USES)
554 /* Get the section contents. */
555 if (contents == NULL)
557 if (coff_section_data (abfd, sec) != NULL
558 && coff_section_data (abfd, sec)->contents != NULL)
559 contents = coff_section_data (abfd, sec)->contents;
562 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
563 if (contents == NULL)
565 free_contents = contents;
567 if (! bfd_get_section_contents (abfd, sec, contents,
568 (file_ptr) 0, sec->_raw_size))
573 /* The r_offset field of the R_SH_USES reloc will point us to
574 the register load. The 4 is because the r_offset field is
575 computed as though it were a jump offset, which are based
576 from 4 bytes after the jump instruction. */
577 laddr = irel->r_vaddr - sec->vma + 4;
578 /* Careful to sign extend the 32-bit offset. */
579 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
580 if (laddr >= sec->_raw_size)
582 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
583 bfd_get_filename (abfd),
584 (unsigned long) irel->r_vaddr);
587 insn = bfd_get_16 (abfd, contents + laddr);
589 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
590 if ((insn & 0xf000) != 0xd000)
592 ((*_bfd_error_handler)
593 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
594 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr, insn));
598 /* Get the address from which the register is being loaded. The
599 displacement in the mov.l instruction is quadrupled. It is a
600 displacement from four bytes after the movl instruction, but,
601 before adding in the PC address, two least significant bits
602 of the PC are cleared. We assume that the section is aligned
603 on a four byte boundary. */
606 paddr += (laddr + 4) &~ 3;
607 if (paddr >= sec->_raw_size)
609 ((*_bfd_error_handler)
610 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
611 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
615 /* Get the reloc for the address from which the register is
616 being loaded. This reloc will tell us which function is
617 actually being called. */
619 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
620 if (irelfn->r_vaddr == paddr
621 && irelfn->r_type == R_SH_IMM32)
623 if (irelfn >= irelend)
625 ((*_bfd_error_handler)
626 ("%s: 0x%lx: warning: could not find expected reloc",
627 bfd_get_filename (abfd), (unsigned long) paddr));
631 /* Get the value of the symbol referred to by the reloc. */
632 if (! _bfd_coff_get_external_symbols (abfd))
634 bfd_coff_swap_sym_in (abfd,
635 ((bfd_byte *) obj_coff_external_syms (abfd)
637 * bfd_coff_symesz (abfd))),
639 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
641 ((*_bfd_error_handler)
642 ("%s: 0x%lx: warning: symbol in unexpected section",
643 bfd_get_filename (abfd), (unsigned long) paddr));
647 if (sym.n_sclass != C_EXT)
649 symval = (sym.n_value
651 + sec->output_section->vma
652 + sec->output_offset);
656 struct coff_link_hash_entry *h;
658 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
659 BFD_ASSERT (h != NULL);
660 if (h->root.type != bfd_link_hash_defined
661 && h->root.type != bfd_link_hash_defweak)
663 /* This appears to be a reference to an undefined
664 symbol. Just ignore it--it will be caught by the
665 regular reloc processing. */
669 symval = (h->root.u.def.value
670 + h->root.u.def.section->output_section->vma
671 + h->root.u.def.section->output_offset);
674 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
676 /* See if this function call can be shortened. */
680 + sec->output_section->vma
683 if (foff < -0x1000 || foff >= 0x1000)
685 /* After all that work, we can't shorten this function call. */
689 /* Shorten the function call. */
691 /* For simplicity of coding, we are going to modify the section
692 contents, the section relocs, and the BFD symbol table. We
693 must tell the rest of the code not to free up this
694 information. It would be possible to instead create a table
695 of changes which have to be made, as is done in coff-mips.c;
696 that would be more work, but would require less memory when
697 the linker is run. */
699 if (coff_section_data (abfd, sec) == NULL)
702 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
703 if (sec->used_by_bfd == NULL)
707 coff_section_data (abfd, sec)->relocs = internal_relocs;
708 coff_section_data (abfd, sec)->keep_relocs = true;
711 coff_section_data (abfd, sec)->contents = contents;
712 coff_section_data (abfd, sec)->keep_contents = true;
713 free_contents = NULL;
715 obj_coff_keep_syms (abfd) = true;
717 /* Replace the jsr with a bsr. */
719 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
720 replace the jsr with a bsr. */
721 irel->r_type = R_SH_PCDISP;
722 irel->r_symndx = irelfn->r_symndx;
723 if (sym.n_sclass != C_EXT)
725 /* If this needs to be changed because of future relaxing,
726 it will be handled here like other internal PCDISP
729 0xb000 | ((foff >> 1) & 0xfff),
730 contents + irel->r_vaddr - sec->vma);
734 /* We can't fully resolve this yet, because the external
735 symbol value may be changed by future relaxing. We let
736 the final link phase handle it. */
737 bfd_put_16 (abfd, 0xb000, contents + irel->r_vaddr - sec->vma);
740 /* See if there is another R_SH_USES reloc referring to the same
742 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
743 if (irelscan->r_type == R_SH_USES
744 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
746 if (irelscan < irelend)
748 /* Some other function call depends upon this register load,
749 and we have not yet converted that function call.
750 Indeed, we may never be able to convert it. There is
751 nothing else we can do at this point. */
755 /* Look for a R_SH_COUNT reloc on the location where the
756 function address is stored. Do this before deleting any
757 bytes, to avoid confusion about the address. */
758 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
759 if (irelcount->r_vaddr == paddr
760 && irelcount->r_type == R_SH_COUNT)
763 /* Delete the register load. */
764 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
767 /* That will change things, so, just in case it permits some
768 other function call to come within range, we should relax
769 again. Note that this is not required, and it may be slow. */
772 /* Now check whether we got a COUNT reloc. */
773 if (irelcount >= irelend)
775 ((*_bfd_error_handler)
776 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
777 bfd_get_filename (abfd), (unsigned long) paddr));
781 /* The number of uses is stored in the r_offset field. We've
783 if (irelcount->r_offset == 0)
785 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
786 bfd_get_filename (abfd),
787 (unsigned long) paddr));
791 --irelcount->r_offset;
793 /* If there are no more uses, we can delete the address. Reload
794 the address from irelfn, in case it was changed by the
795 previous call to sh_relax_delete_bytes. */
796 if (irelcount->r_offset == 0)
798 if (! sh_relax_delete_bytes (abfd, sec,
799 irelfn->r_vaddr - sec->vma, 4))
803 /* We've done all we can with that function call. */
806 /* Look for load and store instructions that we can align on four
812 /* Get the section contents. */
813 if (contents == NULL)
815 if (coff_section_data (abfd, sec) != NULL
816 && coff_section_data (abfd, sec)->contents != NULL)
817 contents = coff_section_data (abfd, sec)->contents;
820 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
821 if (contents == NULL)
823 free_contents = contents;
825 if (! bfd_get_section_contents (abfd, sec, contents,
826 (file_ptr) 0, sec->_raw_size))
831 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
836 if (coff_section_data (abfd, sec) == NULL)
839 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
840 if (sec->used_by_bfd == NULL)
844 coff_section_data (abfd, sec)->relocs = internal_relocs;
845 coff_section_data (abfd, sec)->keep_relocs = true;
848 coff_section_data (abfd, sec)->contents = contents;
849 coff_section_data (abfd, sec)->keep_contents = true;
850 free_contents = NULL;
852 obj_coff_keep_syms (abfd) = true;
856 if (free_relocs != NULL)
862 if (free_contents != NULL)
864 if (! link_info->keep_memory)
865 free (free_contents);
868 /* Cache the section contents for coff_link_input_bfd. */
869 if (coff_section_data (abfd, sec) == NULL)
872 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
873 if (sec->used_by_bfd == NULL)
875 coff_section_data (abfd, sec)->relocs = NULL;
877 coff_section_data (abfd, sec)->contents = contents;
884 if (free_relocs != NULL)
886 if (free_contents != NULL)
887 free (free_contents);
891 /* Delete some bytes from a section while relaxing. */
894 sh_relax_delete_bytes (abfd, sec, addr, count)
901 struct internal_reloc *irel, *irelend;
902 struct internal_reloc *irelalign;
904 bfd_byte *esym, *esymend;
905 bfd_size_type symesz;
906 struct coff_link_hash_entry **sym_hash;
909 contents = coff_section_data (abfd, sec)->contents;
911 /* The deletion must stop at the next ALIGN reloc for an aligment
912 power larger than the number of bytes we are deleting. */
915 toaddr = sec->_cooked_size;
917 irel = coff_section_data (abfd, sec)->relocs;
918 irelend = irel + sec->reloc_count;
919 for (; irel < irelend; irel++)
921 if (irel->r_type == R_SH_ALIGN
922 && irel->r_vaddr - sec->vma > addr
923 && count < (1 << irel->r_offset))
926 toaddr = irel->r_vaddr - sec->vma;
931 /* Actually delete the bytes. */
932 memmove (contents + addr, contents + addr + count, toaddr - addr - count);
933 if (irelalign == NULL)
934 sec->_cooked_size -= count;
939 #define NOP_OPCODE (0x0009)
941 BFD_ASSERT ((count & 1) == 0);
942 for (i = 0; i < count; i += 2)
943 bfd_put_16 (abfd, NOP_OPCODE, contents + toaddr - count + i);
946 /* Adjust all the relocs. */
947 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
949 bfd_vma nraddr, stop;
952 struct internal_syment sym;
953 int off, adjust, oinsn;
954 bfd_signed_vma voff = 0;
957 /* Get the new reloc address. */
958 nraddr = irel->r_vaddr - sec->vma;
959 if ((irel->r_vaddr - sec->vma > addr
960 && irel->r_vaddr - sec->vma < toaddr)
961 || (irel->r_type == R_SH_ALIGN
962 && irel->r_vaddr - sec->vma == toaddr))
965 /* See if this reloc was for the bytes we have deleted, in which
966 case we no longer care about it. Don't delete relocs which
967 represent addresses, though. */
968 if (irel->r_vaddr - sec->vma >= addr
969 && irel->r_vaddr - sec->vma < addr + count
970 && irel->r_type != R_SH_ALIGN
971 && irel->r_type != R_SH_CODE
972 && irel->r_type != R_SH_DATA
973 && irel->r_type != R_SH_LABEL)
974 irel->r_type = R_SH_UNUSED;
976 /* If this is a PC relative reloc, see if the range it covers
977 includes the bytes we have deleted. */
978 switch (irel->r_type)
983 case R_SH_PCDISP8BY2:
985 case R_SH_PCRELIMM8BY2:
986 case R_SH_PCRELIMM8BY4:
987 start = irel->r_vaddr - sec->vma;
988 insn = bfd_get_16 (abfd, contents + nraddr);
992 switch (irel->r_type)
999 /* If this reloc is against a symbol defined in this
1000 section, and the symbol will not be adjusted below, we
1001 must check the addend to see it will put the value in
1002 range to be adjusted, and hence must be changed. */
1003 bfd_coff_swap_sym_in (abfd,
1004 ((bfd_byte *) obj_coff_external_syms (abfd)
1006 * bfd_coff_symesz (abfd))),
1008 if (sym.n_sclass != C_EXT
1009 && sym.n_scnum == sec->target_index
1010 && ((bfd_vma) sym.n_value <= addr
1011 || (bfd_vma) sym.n_value >= toaddr))
1015 val = bfd_get_32 (abfd, contents + nraddr);
1017 if (val > addr && val < toaddr)
1018 bfd_put_32 (abfd, val - count, contents + nraddr);
1020 start = stop = addr;
1023 case R_SH_PCDISP8BY2:
1027 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1031 bfd_coff_swap_sym_in (abfd,
1032 ((bfd_byte *) obj_coff_external_syms (abfd)
1034 * bfd_coff_symesz (abfd))),
1036 if (sym.n_sclass == C_EXT)
1037 start = stop = addr;
1043 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1047 case R_SH_PCRELIMM8BY2:
1049 stop = start + 4 + off * 2;
1052 case R_SH_PCRELIMM8BY4:
1054 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1060 /* These relocs types represent
1062 The r_offset field holds the difference between the reloc
1063 address and L1. That is the start of the reloc, and
1064 adding in the contents gives us the top. We must adjust
1065 both the r_offset field and the section contents. */
1067 start = irel->r_vaddr - sec->vma;
1068 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1072 && (stop <= addr || stop >= toaddr))
1073 irel->r_offset += count;
1074 else if (stop > addr
1076 && (start <= addr || start >= toaddr))
1077 irel->r_offset -= count;
1081 if (irel->r_type == R_SH_SWITCH16)
1082 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1083 else if (irel->r_type == R_SH_SWITCH8)
1084 voff = bfd_get_8 (abfd, contents + nraddr);
1086 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1087 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1092 start = irel->r_vaddr - sec->vma;
1093 stop = (bfd_vma) ((bfd_signed_vma) start
1094 + (long) irel->r_offset
1101 && (stop <= addr || stop >= toaddr))
1103 else if (stop > addr
1105 && (start <= addr || start >= toaddr))
1114 switch (irel->r_type)
1120 case R_SH_PCDISP8BY2:
1121 case R_SH_PCRELIMM8BY2:
1123 if ((oinsn & 0xff00) != (insn & 0xff00))
1125 bfd_put_16 (abfd, insn, contents + nraddr);
1130 if ((oinsn & 0xf000) != (insn & 0xf000))
1132 bfd_put_16 (abfd, insn, contents + nraddr);
1135 case R_SH_PCRELIMM8BY4:
1136 BFD_ASSERT (adjust == count || count >= 4);
1141 if ((irel->r_vaddr & 3) == 0)
1144 if ((oinsn & 0xff00) != (insn & 0xff00))
1146 bfd_put_16 (abfd, insn, contents + nraddr);
1151 if (voff < 0 || voff >= 0xff)
1153 bfd_put_8 (abfd, voff, contents + nraddr);
1158 if (voff < - 0x8000 || voff >= 0x8000)
1160 bfd_put_signed_16 (abfd, voff, contents + nraddr);
1165 bfd_put_signed_32 (abfd, voff, contents + nraddr);
1169 irel->r_offset += adjust;
1175 ((*_bfd_error_handler)
1176 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1177 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
1178 bfd_set_error (bfd_error_bad_value);
1183 irel->r_vaddr = nraddr + sec->vma;
1186 /* Look through all the other sections. If there contain any IMM32
1187 relocs against internal symbols which we are not going to adjust
1188 below, we may need to adjust the addends. */
1189 for (o = abfd->sections; o != NULL; o = o->next)
1191 struct internal_reloc *internal_relocs;
1192 struct internal_reloc *irelscan, *irelscanend;
1193 bfd_byte *ocontents;
1196 || (o->flags & SEC_RELOC) == 0
1197 || o->reloc_count == 0)
1200 /* We always cache the relocs. Perhaps, if info->keep_memory is
1201 false, we should free them, if we are permitted to, when we
1202 leave sh_coff_relax_section. */
1203 internal_relocs = (_bfd_coff_read_internal_relocs
1204 (abfd, o, true, (bfd_byte *) NULL, false,
1205 (struct internal_reloc *) NULL));
1206 if (internal_relocs == NULL)
1210 irelscanend = internal_relocs + o->reloc_count;
1211 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1213 struct internal_syment sym;
1215 if (irelscan->r_type != R_SH_IMM32)
1218 bfd_coff_swap_sym_in (abfd,
1219 ((bfd_byte *) obj_coff_external_syms (abfd)
1220 + (irelscan->r_symndx
1221 * bfd_coff_symesz (abfd))),
1223 if (sym.n_sclass != C_EXT
1224 && sym.n_scnum == sec->target_index
1225 && ((bfd_vma) sym.n_value <= addr
1226 || (bfd_vma) sym.n_value >= toaddr))
1230 if (ocontents == NULL)
1232 if (coff_section_data (abfd, o)->contents != NULL)
1233 ocontents = coff_section_data (abfd, o)->contents;
1236 /* We always cache the section contents.
1237 Perhaps, if info->keep_memory is false, we
1238 should free them, if we are permitted to,
1239 when we leave sh_coff_relax_section. */
1240 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1241 if (ocontents == NULL)
1243 if (! bfd_get_section_contents (abfd, o, ocontents,
1247 coff_section_data (abfd, o)->contents = ocontents;
1251 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1253 if (val > addr && val < toaddr)
1254 bfd_put_32 (abfd, val - count,
1255 ocontents + irelscan->r_vaddr - o->vma);
1257 coff_section_data (abfd, o)->keep_contents = true;
1262 /* Adjusting the internal symbols will not work if something has
1263 already retrieved the generic symbols. It would be possible to
1264 make this work by adjusting the generic symbols at the same time.
1265 However, this case should not arise in normal usage. */
1266 if (obj_symbols (abfd) != NULL
1267 || obj_raw_syments (abfd) != NULL)
1269 ((*_bfd_error_handler)
1270 ("%s: fatal: generic symbols retrieved before relaxing",
1271 bfd_get_filename (abfd)));
1272 bfd_set_error (bfd_error_invalid_operation);
1276 /* Adjust all the symbols. */
1277 sym_hash = obj_coff_sym_hashes (abfd);
1278 symesz = bfd_coff_symesz (abfd);
1279 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1280 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1281 while (esym < esymend)
1283 struct internal_syment isym;
1285 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1287 if (isym.n_scnum == sec->target_index
1288 && (bfd_vma) isym.n_value > addr
1289 && (bfd_vma) isym.n_value < toaddr)
1291 isym.n_value -= count;
1293 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1295 if (*sym_hash != NULL)
1297 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1298 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1299 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1300 && (*sym_hash)->root.u.def.value < toaddr);
1301 (*sym_hash)->root.u.def.value -= count;
1305 esym += (isym.n_numaux + 1) * symesz;
1306 sym_hash += isym.n_numaux + 1;
1309 /* See if we can move the ALIGN reloc forward. We have adjusted
1310 r_vaddr for it already. */
1311 if (irelalign != NULL)
1313 bfd_vma alignto, alignaddr;
1315 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1316 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1317 1 << irelalign->r_offset);
1318 if (alignto != alignaddr)
1320 /* Tail recursion. */
1321 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1322 alignto - alignaddr);
1329 /* This is yet another version of the SH opcode table, used to rapidly
1330 get information about a particular instruction. */
1332 /* The opcode map is represented by an array of these structures. The
1333 array is indexed by the high order four bits in the instruction. */
1335 struct sh_major_opcode
1337 /* A pointer to the instruction list. This is an array which
1338 contains all the instructions with this major opcode. */
1339 const struct sh_minor_opcode *minor_opcodes;
1340 /* The number of elements in minor_opcodes. */
1341 unsigned short count;
1344 /* This structure holds information for a set of SH opcodes. The
1345 instruction code is anded with the mask value, and the resulting
1346 value is used to search the order opcode list. */
1348 struct sh_minor_opcode
1350 /* The sorted opcode list. */
1351 const struct sh_opcode *opcodes;
1352 /* The number of elements in opcodes. */
1353 unsigned short count;
1354 /* The mask value to use when searching the opcode list. */
1355 unsigned short mask;
1358 /* This structure holds information for an SH instruction. An array
1359 of these structures is sorted in order by opcode. */
1363 /* The code for this instruction, after it has been anded with the
1364 mask value in the sh_major_opcode structure. */
1365 unsigned short opcode;
1366 /* Flags for this instruction. */
1370 /* Flag which appear in the sh_opcode structure. */
1372 /* This instruction loads a value from memory. */
1375 /* This instruction stores a value to memory. */
1378 /* This instruction is a branch. */
1379 #define BRANCH (0x4)
1381 /* This instruction has a delay slot. */
1384 /* This instruction uses the value in the register in the field at
1385 mask 0x0f00 of the instruction. */
1386 #define USES1 (0x10)
1387 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1389 /* This instruction uses the value in the register in the field at
1390 mask 0x00f0 of the instruction. */
1391 #define USES2 (0x20)
1392 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1394 /* This instruction uses the value in register 0. */
1395 #define USESR0 (0x40)
1397 /* This instruction sets the value in the register in the field at
1398 mask 0x0f00 of the instruction. */
1399 #define SETS1 (0x80)
1400 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1402 /* This instruction sets the value in the register in the field at
1403 mask 0x00f0 of the instruction. */
1404 #define SETS2 (0x100)
1405 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1407 /* This instruction sets register 0. */
1408 #define SETSR0 (0x200)
1410 /* This instruction sets a special register. */
1411 #define SETSSP (0x400)
1413 /* This instruction uses a special register. */
1414 #define USESSP (0x800)
1416 /* This instruction uses the floating point register in the field at
1417 mask 0x0f00 of the instruction. */
1418 #define USESF1 (0x1000)
1419 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1421 /* This instruction uses the floating point register in the field at
1422 mask 0x00f0 of the instruction. */
1423 #define USESF2 (0x2000)
1424 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1426 /* This instruction uses floating point register 0. */
1427 #define USESF0 (0x4000)
1429 /* This instruction sets the floating point register in the field at
1430 mask 0x0f00 of the instruction. */
1431 #define SETSF1 (0x8000)
1432 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1434 #define USESAS (0x10000)
1435 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1436 #define USESR8 (0x20000)
1437 #define SETSAS (0x40000)
1438 #define SETSAS_REG(x) USESAS_REG (x)
1440 static boolean sh_insn_uses_reg
1441 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1442 static boolean sh_insn_sets_reg
1443 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1444 static boolean sh_insn_uses_or_sets_reg
1445 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1446 static boolean sh_insn_uses_freg
1447 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1448 static boolean sh_insn_sets_freg
1449 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1450 static boolean sh_insn_uses_or_sets_freg
1451 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1452 static boolean sh_insns_conflict
1453 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1454 const struct sh_opcode *));
1455 static boolean sh_load_use
1456 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1457 const struct sh_opcode *));
1459 /* The opcode maps. */
1461 #define MAP(a) a, sizeof a / sizeof a[0]
1463 static const struct sh_opcode sh_opcode00[] =
1465 { 0x0008, SETSSP }, /* clrt */
1466 { 0x0009, 0 }, /* nop */
1467 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1468 { 0x0018, SETSSP }, /* sett */
1469 { 0x0019, SETSSP }, /* div0u */
1470 { 0x001b, 0 }, /* sleep */
1471 { 0x0028, SETSSP }, /* clrmac */
1472 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1473 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1474 { 0x0048, SETSSP }, /* clrs */
1475 { 0x0058, SETSSP } /* sets */
1478 static const struct sh_opcode sh_opcode01[] =
1480 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1481 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1482 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1483 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1484 { 0x0029, SETS1 | USESSP }, /* movt rn */
1485 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1486 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1487 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1488 { 0x0083, LOAD | USES1 }, /* pref @rn */
1489 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1490 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1491 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1492 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1493 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1496 /* These sixteen instructions can be handled with one table entry below. */
1498 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1499 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1500 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1501 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1502 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1503 { 0x0052, SETS1 | USESSP }, /* stc mod,rn */
1504 { 0x0062, SETS1 | USESSP }, /* stc rs,rn */
1505 { 0x0072, SETS1 | USESSP }, /* stc re,rn */
1506 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1507 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1508 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1509 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1510 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1511 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1512 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1513 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1516 static const struct sh_opcode sh_opcode02[] =
1518 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1519 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1520 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1521 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1522 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1523 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1524 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1525 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1526 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1529 static const struct sh_minor_opcode sh_opcode0[] =
1531 { MAP (sh_opcode00), 0xffff },
1532 { MAP (sh_opcode01), 0xf0ff },
1533 { MAP (sh_opcode02), 0xf00f }
1536 static const struct sh_opcode sh_opcode10[] =
1538 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1541 static const struct sh_minor_opcode sh_opcode1[] =
1543 { MAP (sh_opcode10), 0xf000 }
1546 static const struct sh_opcode sh_opcode20[] =
1548 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1549 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1550 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1551 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1552 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1553 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1554 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1555 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1556 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1557 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1558 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1559 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1560 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1561 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1562 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1565 static const struct sh_minor_opcode sh_opcode2[] =
1567 { MAP (sh_opcode20), 0xf00f }
1570 static const struct sh_opcode sh_opcode30[] =
1572 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1573 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1574 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1575 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1576 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1577 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1578 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1579 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1580 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1581 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1582 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1583 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1584 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1585 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1588 static const struct sh_minor_opcode sh_opcode3[] =
1590 { MAP (sh_opcode30), 0xf00f }
1593 static const struct sh_opcode sh_opcode40[] =
1595 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1596 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1597 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1598 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1599 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1600 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1601 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1602 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1603 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1604 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1605 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1606 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1607 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1608 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1609 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1610 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1611 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1612 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1613 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1614 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1615 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1616 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1617 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1618 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1619 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1620 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1621 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1622 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1623 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1624 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1625 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1626 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1627 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1628 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1629 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1630 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1631 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1632 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1633 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1634 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1635 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1636 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1637 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1638 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1639 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1640 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1641 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1642 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1643 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1644 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1645 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1646 #if 0 /* These groups sixteen insns can be
1647 handled with one table entry each below. */
1648 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1649 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1650 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1651 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1652 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1653 { 0x4053, STORE | SETS1 | USES1 | USESSP }, /* stc.l mod,@-rn */
1654 { 0x4063, STORE | SETS1 | USES1 | USESSP }, /* stc.l rs,@-rn */
1655 { 0x4073, STORE | SETS1 | USES1 | USESSP }, /* stc.l re,@-rn */
1656 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l r0_bank,@-rn */
1658 { 0x40f3, STORE | SETS1 | USES1 | USESSP }, /* stc.l r7_bank,@-rn */
1660 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1661 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1662 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1663 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1664 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1665 { 0x4057, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,mod */
1666 { 0x4067, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rs */
1667 { 0x4077, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,re */
1668 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r0_bank */
1670 { 0x40f7, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r7_bank */
1672 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1673 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1674 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1675 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1676 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1677 { 0x405e, SETSSP | USES1 }, /* ldc rm,mod */
1678 { 0x406e, SETSSP | USES1 }, /* ldc rm,rs */
1679 { 0x407e, SETSSP | USES1 } /* ldc rm,re */
1680 { 0x408e, SETSSP | USES1 } /* ldc rm,r0_bank */
1682 { 0x40fe, SETSSP | USES1 } /* ldc rm,r7_bank */
1686 static const struct sh_opcode sh_opcode41[] =
1688 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1689 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1690 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1691 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1692 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1693 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1696 static const struct sh_minor_opcode sh_opcode4[] =
1698 { MAP (sh_opcode40), 0xf0ff },
1699 { MAP (sh_opcode41), 0xf00f }
1702 static const struct sh_opcode sh_opcode50[] =
1704 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1707 static const struct sh_minor_opcode sh_opcode5[] =
1709 { MAP (sh_opcode50), 0xf000 }
1712 static const struct sh_opcode sh_opcode60[] =
1714 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1715 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1716 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1717 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1718 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1719 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1720 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1721 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1722 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1723 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1724 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1725 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1726 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1727 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1728 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1729 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1732 static const struct sh_minor_opcode sh_opcode6[] =
1734 { MAP (sh_opcode60), 0xf00f }
1737 static const struct sh_opcode sh_opcode70[] =
1739 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1742 static const struct sh_minor_opcode sh_opcode7[] =
1744 { MAP (sh_opcode70), 0xf000 }
1747 static const struct sh_opcode sh_opcode80[] =
1749 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1750 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1751 { 0x8200, SETSSP }, /* setrc #imm */
1752 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1753 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1754 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1755 { 0x8900, BRANCH | USESSP }, /* bt label */
1756 { 0x8b00, BRANCH | USESSP }, /* bf label */
1757 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1758 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1759 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1760 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1763 static const struct sh_minor_opcode sh_opcode8[] =
1765 { MAP (sh_opcode80), 0xff00 }
1768 static const struct sh_opcode sh_opcode90[] =
1770 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1773 static const struct sh_minor_opcode sh_opcode9[] =
1775 { MAP (sh_opcode90), 0xf000 }
1778 static const struct sh_opcode sh_opcodea0[] =
1780 { 0xa000, BRANCH | DELAY } /* bra label */
1783 static const struct sh_minor_opcode sh_opcodea[] =
1785 { MAP (sh_opcodea0), 0xf000 }
1788 static const struct sh_opcode sh_opcodeb0[] =
1790 { 0xb000, BRANCH | DELAY } /* bsr label */
1793 static const struct sh_minor_opcode sh_opcodeb[] =
1795 { MAP (sh_opcodeb0), 0xf000 }
1798 static const struct sh_opcode sh_opcodec0[] =
1800 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1801 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1802 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1803 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1804 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1805 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1806 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1807 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1808 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1809 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1810 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1811 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1812 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1813 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1814 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1815 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1818 static const struct sh_minor_opcode sh_opcodec[] =
1820 { MAP (sh_opcodec0), 0xff00 }
1823 static const struct sh_opcode sh_opcoded0[] =
1825 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1828 static const struct sh_minor_opcode sh_opcoded[] =
1830 { MAP (sh_opcoded0), 0xf000 }
1833 static const struct sh_opcode sh_opcodee0[] =
1835 { 0xe000, SETS1 } /* mov #imm,rn */
1838 static const struct sh_minor_opcode sh_opcodee[] =
1840 { MAP (sh_opcodee0), 0xf000 }
1843 static const struct sh_opcode sh_opcodef0[] =
1845 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1846 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1847 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1848 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1849 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1850 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1851 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1852 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1853 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1854 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1855 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1856 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1857 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1858 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1861 static const struct sh_opcode sh_opcodef1[] =
1863 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1864 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1865 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1866 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1867 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1868 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1869 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1870 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1871 { 0xf08d, SETSF1 }, /* fldi0 fn */
1872 { 0xf09d, SETSF1 } /* fldi1 fn */
1875 static const struct sh_minor_opcode sh_opcodef[] =
1877 { MAP (sh_opcodef0), 0xf00f },
1878 { MAP (sh_opcodef1), 0xf0ff }
1881 static struct sh_major_opcode sh_opcodes[] =
1883 { MAP (sh_opcode0) },
1884 { MAP (sh_opcode1) },
1885 { MAP (sh_opcode2) },
1886 { MAP (sh_opcode3) },
1887 { MAP (sh_opcode4) },
1888 { MAP (sh_opcode5) },
1889 { MAP (sh_opcode6) },
1890 { MAP (sh_opcode7) },
1891 { MAP (sh_opcode8) },
1892 { MAP (sh_opcode9) },
1893 { MAP (sh_opcodea) },
1894 { MAP (sh_opcodeb) },
1895 { MAP (sh_opcodec) },
1896 { MAP (sh_opcoded) },
1897 { MAP (sh_opcodee) },
1898 { MAP (sh_opcodef) }
1901 /* The double data transfer / parallel processing insns are not
1902 described here. This will cause sh_align_load_span to leave them alone. */
1904 static const struct sh_opcode sh_dsp_opcodef0[] =
1906 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
1907 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
1908 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
1909 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
1910 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
1911 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
1912 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
1913 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
1916 static const struct sh_minor_opcode sh_dsp_opcodef[] =
1918 { MAP (sh_dsp_opcodef0), 0xfc0d }
1921 /* Given an instruction, return a pointer to the corresponding
1922 sh_opcode structure. Return NULL if the instruction is not
1925 static const struct sh_opcode *
1929 const struct sh_major_opcode *maj;
1930 const struct sh_minor_opcode *min, *minend;
1932 maj = &sh_opcodes[(insn & 0xf000) >> 12];
1933 min = maj->minor_opcodes;
1934 minend = min + maj->count;
1935 for (; min < minend; min++)
1938 const struct sh_opcode *op, *opend;
1940 l = insn & min->mask;
1942 opend = op + min->count;
1944 /* Since the opcodes tables are sorted, we could use a binary
1945 search here if the count were above some cutoff value. */
1946 for (; op < opend; op++)
1947 if (op->opcode == l)
1954 /* See whether an instruction uses or sets a general purpose register */
1957 sh_insn_uses_or_sets_reg (insn, op, reg)
1959 const struct sh_opcode *op;
1962 if (sh_insn_uses_reg (insn, op, reg))
1965 return sh_insn_sets_reg (insn, op, reg);
1968 /* See whether an instruction uses a general purpose register. */
1971 sh_insn_uses_reg (insn, op, reg)
1973 const struct sh_opcode *op;
1980 if ((f & USES1) != 0
1981 && USES1_REG (insn) == reg)
1983 if ((f & USES2) != 0
1984 && USES2_REG (insn) == reg)
1986 if ((f & USESR0) != 0
1989 if ((f & USESAS) && reg == USESAS_REG (insn))
1991 if ((f & USESR8) && reg == 8)
1996 /* See whether an instruction sets a general purpose register. */
1999 sh_insn_sets_reg (insn, op, reg)
2001 const struct sh_opcode *op;
2008 if ((f & SETS1) != 0
2009 && SETS1_REG (insn) == reg)
2011 if ((f & SETS2) != 0
2012 && SETS2_REG (insn) == reg)
2014 if ((f & SETSR0) != 0
2017 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2023 /* See whether an instruction uses or sets a floating point register */
2026 sh_insn_uses_or_sets_freg (insn, op, reg)
2028 const struct sh_opcode *op;
2031 if (sh_insn_uses_freg (insn, op, reg))
2034 return sh_insn_sets_freg (insn, op, reg);
2037 /* See whether an instruction uses a floating point register. */
2040 sh_insn_uses_freg (insn, op, freg)
2042 const struct sh_opcode *op;
2049 /* We can't tell if this is a double-precision insn, so just play safe
2050 and assume that it might be. So not only have we test FREG against
2051 itself, but also even FREG against FREG+1 - if the using insn uses
2052 just the low part of a double precision value - but also an odd
2053 FREG against FREG-1 - if the setting insn sets just the low part
2054 of a double precision value.
2055 So what this all boils down to is that we have to ignore the lowest
2056 bit of the register number. */
2058 if ((f & USESF1) != 0
2059 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2061 if ((f & USESF2) != 0
2062 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2064 if ((f & USESF0) != 0
2071 /* See whether an instruction sets a floating point register. */
2074 sh_insn_sets_freg (insn, op, freg)
2076 const struct sh_opcode *op;
2083 /* We can't tell if this is a double-precision insn, so just play safe
2084 and assume that it might be. So not only have we test FREG against
2085 itself, but also even FREG against FREG+1 - if the using insn uses
2086 just the low part of a double precision value - but also an odd
2087 FREG against FREG-1 - if the setting insn sets just the low part
2088 of a double precision value.
2089 So what this all boils down to is that we have to ignore the lowest
2090 bit of the register number. */
2092 if ((f & SETSF1) != 0
2093 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2099 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2100 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2101 This should return true if there is a conflict, or false if the
2102 instructions can be swapped safely. */
2105 sh_insns_conflict (i1, op1, i2, op2)
2107 const struct sh_opcode *op1;
2109 const struct sh_opcode *op2;
2111 unsigned int f1, f2;
2116 /* Load of fpscr conflicts with floating point operations.
2117 FIXME: shouldn't test raw opcodes here. */
2118 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2119 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2122 if ((f1 & (BRANCH | DELAY)) != 0
2123 || (f2 & (BRANCH | DELAY)) != 0)
2126 if (((f1 | f2) & SETSSP)
2127 && (f1 & (SETSSP | USESSP))
2128 && (f2 & (SETSSP | USESSP)))
2131 if ((f1 & SETS1) != 0
2132 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2134 if ((f1 & SETS2) != 0
2135 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2137 if ((f1 & SETSR0) != 0
2138 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2141 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2143 if ((f1 & SETSF1) != 0
2144 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2147 if ((f2 & SETS1) != 0
2148 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2150 if ((f2 & SETS2) != 0
2151 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2153 if ((f2 & SETSR0) != 0
2154 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2157 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2159 if ((f2 & SETSF1) != 0
2160 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2163 /* The instructions do not conflict. */
2167 /* I1 is a load instruction, and I2 is some other instruction. Return
2168 true if I1 loads a register which I2 uses. */
2171 sh_load_use (i1, op1, i2, op2)
2173 const struct sh_opcode *op1;
2175 const struct sh_opcode *op2;
2181 if ((f1 & LOAD) == 0)
2184 /* If both SETS1 and SETSSP are set, that means a load to a special
2185 register using postincrement addressing mode, which we don't care
2187 if ((f1 & SETS1) != 0
2188 && (f1 & SETSSP) == 0
2189 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2192 if ((f1 & SETSR0) != 0
2193 && sh_insn_uses_reg (i2, op2, 0))
2196 if ((f1 & SETSF1) != 0
2197 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2203 /* Try to align loads and stores within a span of memory. This is
2204 called by both the ELF and the COFF sh targets. ABFD and SEC are
2205 the BFD and section we are examining. CONTENTS is the contents of
2206 the section. SWAP is the routine to call to swap two instructions.
2207 RELOCS is a pointer to the internal relocation information, to be
2208 passed to SWAP. PLABEL is a pointer to the current label in a
2209 sorted list of labels; LABEL_END is the end of the list. START and
2210 STOP are the range of memory to examine. If a swap is made,
2211 *PSWAPPED is set to true. */
2214 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2215 plabel, label_end, start, stop, pswapped)
2219 boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2227 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2228 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2231 /* The SH4 has a Harvard architecture, hence aligning loads is not
2232 desirable. In fact, it is counter-productive, since it interferes
2233 with the schedules generated by the compiler. */
2234 if (abfd->arch_info->mach == bfd_mach_sh4)
2237 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2241 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2242 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2245 /* Instructions should be aligned on 2 byte boundaries. */
2246 if ((start & 1) == 1)
2249 /* Now look through the unaligned addresses. */
2253 for (; i < stop; i += 4)
2256 const struct sh_opcode *op;
2257 unsigned int prev_insn = 0;
2258 const struct sh_opcode *prev_op = NULL;
2260 insn = bfd_get_16 (abfd, contents + i);
2261 op = sh_insn_info (insn);
2263 || (op->flags & (LOAD | STORE)) == 0)
2266 /* This is a load or store which is not on a four byte boundary. */
2268 while (*plabel < label_end && **plabel < i)
2273 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2274 /* If INSN is the field b of a parallel processing insn, it is not
2275 a load / store after all. Note that the test here might mistake
2276 the field_b of a pcopy insn for the starting code of a parallel
2277 processing insn; this might miss a swapping opportunity, but at
2278 least we're on the safe side. */
2279 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2282 /* Check if prev_insn is actually the field b of a parallel
2283 processing insn. Again, this can give a spurious match
2285 if (dsp && i - 2 > start)
2287 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2289 if ((pprev_insn & 0xfc00) == 0xf800)
2292 prev_op = sh_insn_info (prev_insn);
2295 prev_op = sh_insn_info (prev_insn);
2297 /* If the load/store instruction is in a delay slot, we
2300 || (prev_op->flags & DELAY) != 0)
2304 && (*plabel >= label_end || **plabel != i)
2306 && (prev_op->flags & (LOAD | STORE)) == 0
2307 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2311 /* The load/store instruction does not have a label, and
2312 there is a previous instruction; PREV_INSN is not
2313 itself a load/store instruction, and PREV_INSN and
2314 INSN do not conflict. */
2320 unsigned int prev2_insn;
2321 const struct sh_opcode *prev2_op;
2323 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2324 prev2_op = sh_insn_info (prev2_insn);
2326 /* If the instruction before PREV_INSN has a delay
2327 slot--that is, PREV_INSN is in a delay slot--we
2329 if (prev2_op == NULL
2330 || (prev2_op->flags & DELAY) != 0)
2333 /* If the instruction before PREV_INSN is a load,
2334 and it sets a register which INSN uses, then
2335 putting INSN immediately after PREV_INSN will
2336 cause a pipeline bubble, so there is no point to
2339 && (prev2_op->flags & LOAD) != 0
2340 && sh_load_use (prev2_insn, prev2_op, insn, op))
2346 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2353 while (*plabel < label_end && **plabel < i + 2)
2357 && (*plabel >= label_end || **plabel != i + 2))
2359 unsigned int next_insn;
2360 const struct sh_opcode *next_op;
2362 /* There is an instruction after the load/store
2363 instruction, and it does not have a label. */
2364 next_insn = bfd_get_16 (abfd, contents + i + 2);
2365 next_op = sh_insn_info (next_insn);
2367 && (next_op->flags & (LOAD | STORE)) == 0
2368 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2372 /* NEXT_INSN is not itself a load/store instruction,
2373 and it does not conflict with INSN. */
2377 /* If PREV_INSN is a load, and it sets a register
2378 which NEXT_INSN uses, then putting NEXT_INSN
2379 immediately after PREV_INSN will cause a pipeline
2380 bubble, so there is no reason to make this swap. */
2382 && (prev_op->flags & LOAD) != 0
2383 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2386 /* If INSN is a load, and it sets a register which
2387 the insn after NEXT_INSN uses, then doing the
2388 swap will cause a pipeline bubble, so there is no
2389 reason to make the swap. However, if the insn
2390 after NEXT_INSN is itself a load or store
2391 instruction, then it is misaligned, so
2392 optimistically hope that it will be swapped
2393 itself, and just live with the pipeline bubble if
2397 && (op->flags & LOAD) != 0)
2399 unsigned int next2_insn;
2400 const struct sh_opcode *next2_op;
2402 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2403 next2_op = sh_insn_info (next2_insn);
2404 if ((next2_op->flags & (LOAD | STORE)) == 0
2405 && sh_load_use (insn, op, next2_insn, next2_op))
2411 if (! (*swap) (abfd, sec, relocs, contents, i))
2423 /* Look for loads and stores which we can align to four byte
2424 boundaries. See the longer comment above sh_relax_section for why
2425 this is desirable. This sets *PSWAPPED if some instruction was
2429 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2432 struct internal_reloc *internal_relocs;
2436 struct internal_reloc *irel, *irelend;
2437 bfd_vma *labels = NULL;
2438 bfd_vma *label, *label_end;
2442 irelend = internal_relocs + sec->reloc_count;
2444 /* Get all the addresses with labels on them. */
2445 labels = (bfd_vma *) bfd_malloc (sec->reloc_count * sizeof (bfd_vma));
2449 for (irel = internal_relocs; irel < irelend; irel++)
2451 if (irel->r_type == R_SH_LABEL)
2453 *label_end = irel->r_vaddr - sec->vma;
2458 /* Note that the assembler currently always outputs relocs in
2459 address order. If that ever changes, this code will need to sort
2460 the label values and the relocs. */
2464 for (irel = internal_relocs; irel < irelend; irel++)
2466 bfd_vma start, stop;
2468 if (irel->r_type != R_SH_CODE)
2471 start = irel->r_vaddr - sec->vma;
2473 for (irel++; irel < irelend; irel++)
2474 if (irel->r_type == R_SH_DATA)
2477 stop = irel->r_vaddr - sec->vma;
2479 stop = sec->_cooked_size;
2481 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2482 (PTR) internal_relocs, &label,
2483 label_end, start, stop, pswapped))
2497 /* Swap two SH instructions. */
2500 sh_swap_insns (abfd, sec, relocs, contents, addr)
2507 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2508 unsigned short i1, i2;
2509 struct internal_reloc *irel, *irelend;
2511 /* Swap the instructions themselves. */
2512 i1 = bfd_get_16 (abfd, contents + addr);
2513 i2 = bfd_get_16 (abfd, contents + addr + 2);
2514 bfd_put_16 (abfd, i2, contents + addr);
2515 bfd_put_16 (abfd, i1, contents + addr + 2);
2517 /* Adjust all reloc addresses. */
2518 irelend = internal_relocs + sec->reloc_count;
2519 for (irel = internal_relocs; irel < irelend; irel++)
2523 /* There are a few special types of relocs that we don't want to
2524 adjust. These relocs do not apply to the instruction itself,
2525 but are only associated with the address. */
2526 type = irel->r_type;
2527 if (type == R_SH_ALIGN
2528 || type == R_SH_CODE
2529 || type == R_SH_DATA
2530 || type == R_SH_LABEL)
2533 /* If an R_SH_USES reloc points to one of the addresses being
2534 swapped, we must adjust it. It would be incorrect to do this
2535 for a jump, though, since we want to execute both
2536 instructions after the jump. (We have avoided swapping
2537 around a label, so the jump will not wind up executing an
2538 instruction it shouldn't). */
2539 if (type == R_SH_USES)
2543 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2545 irel->r_offset += 2;
2546 else if (off == addr + 2)
2547 irel->r_offset -= 2;
2550 if (irel->r_vaddr - sec->vma == addr)
2555 else if (irel->r_vaddr - sec->vma == addr + 2)
2566 unsigned short insn, oinsn;
2569 loc = contents + irel->r_vaddr - sec->vma;
2576 case R_SH_PCDISP8BY2:
2577 case R_SH_PCRELIMM8BY2:
2578 insn = bfd_get_16 (abfd, loc);
2581 if ((oinsn & 0xff00) != (insn & 0xff00))
2583 bfd_put_16 (abfd, insn, loc);
2587 insn = bfd_get_16 (abfd, loc);
2590 if ((oinsn & 0xf000) != (insn & 0xf000))
2592 bfd_put_16 (abfd, insn, loc);
2595 case R_SH_PCRELIMM8BY4:
2596 /* This reloc ignores the least significant 3 bits of
2597 the program counter before adding in the offset.
2598 This means that if ADDR is at an even address, the
2599 swap will not affect the offset. If ADDR is an at an
2600 odd address, then the instruction will be crossing a
2601 four byte boundary, and must be adjusted. */
2602 if ((addr & 3) != 0)
2604 insn = bfd_get_16 (abfd, loc);
2607 if ((oinsn & 0xff00) != (insn & 0xff00))
2609 bfd_put_16 (abfd, insn, loc);
2617 ((*_bfd_error_handler)
2618 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2619 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
2620 bfd_set_error (bfd_error_bad_value);
2629 /* This is a modification of _bfd_coff_generic_relocate_section, which
2630 will handle SH relaxing. */
2633 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2634 relocs, syms, sections)
2635 bfd *output_bfd ATTRIBUTE_UNUSED;
2636 struct bfd_link_info *info;
2638 asection *input_section;
2640 struct internal_reloc *relocs;
2641 struct internal_syment *syms;
2642 asection **sections;
2644 struct internal_reloc *rel;
2645 struct internal_reloc *relend;
2648 relend = rel + input_section->reloc_count;
2649 for (; rel < relend; rel++)
2652 struct coff_link_hash_entry *h;
2653 struct internal_syment *sym;
2656 reloc_howto_type *howto;
2657 bfd_reloc_status_type rstat;
2659 /* Almost all relocs have to do with relaxing. If any work must
2660 be done for them, it has been done in sh_relax_section. */
2661 if (rel->r_type != R_SH_IMM32
2662 && rel->r_type != R_SH_PCDISP)
2665 symndx = rel->r_symndx;
2675 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2677 (*_bfd_error_handler)
2678 ("%s: illegal symbol index %ld in relocs",
2679 bfd_get_filename (input_bfd), symndx);
2680 bfd_set_error (bfd_error_bad_value);
2683 h = obj_coff_sym_hashes (input_bfd)[symndx];
2684 sym = syms + symndx;
2687 if (sym != NULL && sym->n_scnum != 0)
2688 addend = - sym->n_value;
2692 if (rel->r_type == R_SH_PCDISP)
2695 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2698 howto = &sh_coff_howtos[rel->r_type];
2702 bfd_set_error (bfd_error_bad_value);
2712 /* There is nothing to do for an internal PCDISP reloc. */
2713 if (rel->r_type == R_SH_PCDISP)
2718 sec = bfd_abs_section_ptr;
2723 sec = sections[symndx];
2724 val = (sec->output_section->vma
2725 + sec->output_offset
2732 if (h->root.type == bfd_link_hash_defined
2733 || h->root.type == bfd_link_hash_defweak)
2737 sec = h->root.u.def.section;
2738 val = (h->root.u.def.value
2739 + sec->output_section->vma
2740 + sec->output_offset);
2742 else if (! info->relocateable)
2744 if (! ((*info->callbacks->undefined_symbol)
2745 (info, h->root.root.string, input_bfd, input_section,
2746 rel->r_vaddr - input_section->vma)))
2751 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2753 rel->r_vaddr - input_section->vma,
2762 case bfd_reloc_overflow:
2765 char buf[SYMNMLEN + 1];
2770 name = h->root.root.string;
2771 else if (sym->_n._n_n._n_zeroes == 0
2772 && sym->_n._n_n._n_offset != 0)
2773 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2776 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2777 buf[SYMNMLEN] = '\0';
2781 if (! ((*info->callbacks->reloc_overflow)
2782 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2783 input_section, rel->r_vaddr - input_section->vma)))
2792 /* This is a version of bfd_generic_get_relocated_section_contents
2793 which uses sh_relocate_section. */
2796 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2797 data, relocateable, symbols)
2799 struct bfd_link_info *link_info;
2800 struct bfd_link_order *link_order;
2802 boolean relocateable;
2805 asection *input_section = link_order->u.indirect.section;
2806 bfd *input_bfd = input_section->owner;
2807 asection **sections = NULL;
2808 struct internal_reloc *internal_relocs = NULL;
2809 struct internal_syment *internal_syms = NULL;
2811 /* We only need to handle the case of relaxing, or of having a
2812 particular set of section contents, specially. */
2814 || coff_section_data (input_bfd, input_section) == NULL
2815 || coff_section_data (input_bfd, input_section)->contents == NULL)
2816 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2821 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2822 input_section->_raw_size);
2824 if ((input_section->flags & SEC_RELOC) != 0
2825 && input_section->reloc_count > 0)
2827 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2828 bfd_byte *esym, *esymend;
2829 struct internal_syment *isymp;
2832 if (! _bfd_coff_get_external_symbols (input_bfd))
2835 internal_relocs = (_bfd_coff_read_internal_relocs
2836 (input_bfd, input_section, false, (bfd_byte *) NULL,
2837 false, (struct internal_reloc *) NULL));
2838 if (internal_relocs == NULL)
2841 internal_syms = ((struct internal_syment *)
2842 bfd_malloc (obj_raw_syment_count (input_bfd)
2843 * sizeof (struct internal_syment)));
2844 if (internal_syms == NULL)
2847 sections = (asection **) bfd_malloc (obj_raw_syment_count (input_bfd)
2848 * sizeof (asection *));
2849 if (sections == NULL)
2852 isymp = internal_syms;
2854 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2855 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2856 while (esym < esymend)
2858 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
2860 if (isymp->n_scnum != 0)
2861 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2864 if (isymp->n_value == 0)
2865 *secpp = bfd_und_section_ptr;
2867 *secpp = bfd_com_section_ptr;
2870 esym += (isymp->n_numaux + 1) * symesz;
2871 secpp += isymp->n_numaux + 1;
2872 isymp += isymp->n_numaux + 1;
2875 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2876 input_section, data, internal_relocs,
2877 internal_syms, sections))
2882 free (internal_syms);
2883 internal_syms = NULL;
2884 free (internal_relocs);
2885 internal_relocs = NULL;
2891 if (internal_relocs != NULL)
2892 free (internal_relocs);
2893 if (internal_syms != NULL)
2894 free (internal_syms);
2895 if (sections != NULL)
2900 /* The target vectors. */
2902 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL)
2904 #ifdef TARGET_SHL_SYM
2905 #define TARGET_SYM TARGET_SHL_SYM
2907 #define TARGET_SYM shlcoff_vec
2910 #ifndef TARGET_SHL_NAME
2911 #define TARGET_SHL_NAME "coff-shl"
2914 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE, 0, '_', NULL)
2917 /* Some people want versions of the SH COFF target which do not align
2918 to 16 byte boundaries. We implement that by adding a couple of new
2919 target vectors. These are just like the ones above, but they
2920 change the default section alignment. To generate them in the
2921 assembler, use -small. To use them in the linker, use -b
2922 coff-sh{l}-small and -oformat coff-sh{l}-small.
2924 Yes, this is a horrible hack. A general solution for setting
2925 section alignment in COFF is rather complex. ELF handles this
2928 /* Only recognize the small versions if the target was not defaulted.
2929 Otherwise we won't recognize the non default endianness. */
2931 static const bfd_target *
2932 coff_small_object_p (abfd)
2935 if (abfd->target_defaulted)
2937 bfd_set_error (bfd_error_wrong_format);
2940 return coff_object_p (abfd);
2943 /* Set the section alignment for the small versions. */
2946 coff_small_new_section_hook (abfd, section)
2950 if (! coff_new_section_hook (abfd, section))
2953 /* We must align to at least a four byte boundary, because longword
2954 accesses must be on a four byte boundary. */
2955 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
2956 section->alignment_power = 2;
2961 /* This is copied from bfd_coff_std_swap_table so that we can change
2962 the default section alignment power. */
2964 static const bfd_coff_backend_data bfd_coff_small_swap_table =
2966 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
2967 coff_swap_aux_out, coff_swap_sym_out,
2968 coff_swap_lineno_out, coff_swap_reloc_out,
2969 coff_swap_filehdr_out, coff_swap_aouthdr_out,
2970 coff_swap_scnhdr_out,
2971 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
2972 #ifdef COFF_LONG_FILENAMES
2977 #ifdef COFF_LONG_SECTION_NAMES
2983 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
2984 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
2985 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
2986 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
2987 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
2988 coff_classify_symbol, coff_compute_section_file_positions,
2989 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
2990 coff_adjust_symndx, coff_link_add_one_symbol,
2991 coff_link_output_has_begun, coff_final_link_postscript
2994 #define coff_small_close_and_cleanup \
2995 coff_close_and_cleanup
2996 #define coff_small_bfd_free_cached_info \
2997 coff_bfd_free_cached_info
2998 #define coff_small_get_section_contents \
2999 coff_get_section_contents
3000 #define coff_small_get_section_contents_in_window \
3001 coff_get_section_contents_in_window
3003 extern const bfd_target shlcoff_small_vec;
3005 const bfd_target shcoff_small_vec =
3007 "coff-sh-small", /* name */
3008 bfd_target_coff_flavour,
3009 BFD_ENDIAN_BIG, /* data byte order is big */
3010 BFD_ENDIAN_BIG, /* header byte order is big */
3012 (HAS_RELOC | EXEC_P | /* object flags */
3013 HAS_LINENO | HAS_DEBUG |
3014 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3016 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3017 '_', /* leading symbol underscore */
3018 '/', /* ar_pad_char */
3019 15, /* ar_max_namelen */
3020 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3021 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3022 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3023 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3024 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3025 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3027 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3028 bfd_generic_archive_p, _bfd_dummy_target},
3029 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3031 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3032 _bfd_write_archive_contents, bfd_false},
3034 BFD_JUMP_TABLE_GENERIC (coff_small),
3035 BFD_JUMP_TABLE_COPY (coff),
3036 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3037 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3038 BFD_JUMP_TABLE_SYMBOLS (coff),
3039 BFD_JUMP_TABLE_RELOCS (coff),
3040 BFD_JUMP_TABLE_WRITE (coff),
3041 BFD_JUMP_TABLE_LINK (coff),
3042 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3044 & shlcoff_small_vec,
3046 (PTR) &bfd_coff_small_swap_table
3049 const bfd_target shlcoff_small_vec =
3051 "coff-shl-small", /* name */
3052 bfd_target_coff_flavour,
3053 BFD_ENDIAN_LITTLE, /* data byte order is little */
3054 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3056 (HAS_RELOC | EXEC_P | /* object flags */
3057 HAS_LINENO | HAS_DEBUG |
3058 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3060 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3061 '_', /* leading symbol underscore */
3062 '/', /* ar_pad_char */
3063 15, /* ar_max_namelen */
3064 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3065 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3066 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3067 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3068 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3069 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3071 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3072 bfd_generic_archive_p, _bfd_dummy_target},
3073 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3075 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3076 _bfd_write_archive_contents, bfd_false},
3078 BFD_JUMP_TABLE_GENERIC (coff_small),
3079 BFD_JUMP_TABLE_COPY (coff),
3080 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3081 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3082 BFD_JUMP_TABLE_SYMBOLS (coff),
3083 BFD_JUMP_TABLE_RELOCS (coff),
3084 BFD_JUMP_TABLE_WRITE (coff),
3085 BFD_JUMP_TABLE_LINK (coff),
3086 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3090 (PTR) &bfd_coff_small_swap_table