1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
6 This file is part of GAS, the GNU Assembler.
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
50 #include "opcode/ia64.h"
54 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
55 #define MIN(a,b) ((a) < (b) ? (a) : (b))
58 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
59 #define CURR_SLOT md.slot[md.curr_slot]
61 #define O_pseudo_fixup (O_max + 1)
65 /* IA-64 ABI section pseudo-ops. */
66 SPECIAL_SECTION_BSS = 0,
68 SPECIAL_SECTION_SDATA,
69 SPECIAL_SECTION_RODATA,
70 SPECIAL_SECTION_COMMENT,
71 SPECIAL_SECTION_UNWIND,
72 SPECIAL_SECTION_UNWIND_INFO,
73 /* HPUX specific section pseudo-ops. */
74 SPECIAL_SECTION_INIT_ARRAY,
75 SPECIAL_SECTION_FINI_ARRAY,
92 FUNC_LT_FPTR_RELATIVE,
102 REG_FR = (REG_GR + 128),
103 REG_AR = (REG_FR + 128),
104 REG_CR = (REG_AR + 128),
105 REG_P = (REG_CR + 128),
106 REG_BR = (REG_P + 64),
107 REG_IP = (REG_BR + 8),
114 /* The following are pseudo-registers for use by gas only. */
126 /* The following pseudo-registers are used for unwind directives only: */
134 DYNREG_GR = 0, /* dynamic general purpose register */
135 DYNREG_FR, /* dynamic floating point register */
136 DYNREG_PR, /* dynamic predicate register */
140 enum operand_match_result
143 OPERAND_OUT_OF_RANGE,
147 /* On the ia64, we can't know the address of a text label until the
148 instructions are packed into a bundle. To handle this, we keep
149 track of the list of labels that appear in front of each
153 struct label_fix *next;
157 /* This is the endianness of the current section. */
158 extern int target_big_endian;
160 /* This is the default endianness. */
161 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
163 void (*ia64_number_to_chars) PARAMS ((char *, valueT, int));
165 static void ia64_float_to_chars_bigendian
166 PARAMS ((char *, LITTLENUM_TYPE *, int));
167 static void ia64_float_to_chars_littleendian
168 PARAMS ((char *, LITTLENUM_TYPE *, int));
169 static void (*ia64_float_to_chars)
170 PARAMS ((char *, LITTLENUM_TYPE *, int));
172 static struct hash_control *alias_hash;
173 static struct hash_control *alias_name_hash;
174 static struct hash_control *secalias_hash;
175 static struct hash_control *secalias_name_hash;
177 /* Characters which always start a comment. */
178 const char comment_chars[] = "";
180 /* Characters which start a comment at the beginning of a line. */
181 const char line_comment_chars[] = "#";
183 /* Characters which may be used to separate multiple commands on a
185 const char line_separator_chars[] = ";";
187 /* Characters which are used to indicate an exponent in a floating
189 const char EXP_CHARS[] = "eE";
191 /* Characters which mean that a number is a floating point constant,
193 const char FLT_CHARS[] = "rRsSfFdDxXpP";
195 /* ia64-specific option processing: */
197 const char *md_shortopts = "m:N:x::";
199 struct option md_longopts[] =
201 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
202 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
203 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
204 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
207 size_t md_longopts_size = sizeof (md_longopts);
211 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
212 struct hash_control *reg_hash; /* register name hash table */
213 struct hash_control *dynreg_hash; /* dynamic register hash table */
214 struct hash_control *const_hash; /* constant hash table */
215 struct hash_control *entry_hash; /* code entry hint hash table */
217 symbolS *regsym[REG_NUM];
219 /* If X_op is != O_absent, the registername for the instruction's
220 qualifying predicate. If NULL, p0 is assumed for instructions
221 that are predicatable. */
228 explicit_mode : 1, /* which mode we're in */
229 default_explicit_mode : 1, /* which mode is the default */
230 mode_explicitly_set : 1, /* was the current mode explicitly set? */
232 keep_pending_output : 1;
234 /* Each bundle consists of up to three instructions. We keep
235 track of four most recent instructions so we can correctly set
236 the end_of_insn_group for the last instruction in a bundle. */
238 int num_slots_in_use;
242 end_of_insn_group : 1,
243 manual_bundling_on : 1,
244 manual_bundling_off : 1,
245 loc_directive_seen : 1;
246 signed char user_template; /* user-selected template, if any */
247 unsigned char qp_regno; /* qualifying predicate */
248 /* This duplicates a good fraction of "struct fix" but we
249 can't use a "struct fix" instead since we can't call
250 fix_new_exp() until we know the address of the instruction. */
254 bfd_reloc_code_real_type code;
255 enum ia64_opnd opnd; /* type of operand in need of fix */
256 unsigned int is_pcrel : 1; /* is operand pc-relative? */
257 expressionS expr; /* the value to be inserted */
259 fixup[2]; /* at most two fixups per insn */
260 struct ia64_opcode *idesc;
261 struct label_fix *label_fixups;
262 struct label_fix *tag_fixups;
263 struct unw_rec_list *unwind_record; /* Unwind directive. */
266 unsigned int src_line;
267 struct dwarf2_line_info debug_line;
275 struct dynreg *next; /* next dynamic register */
277 unsigned short base; /* the base register number */
278 unsigned short num_regs; /* # of registers in this set */
280 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
282 flagword flags; /* ELF-header flags */
285 unsigned hint:1; /* is this hint currently valid? */
286 bfd_vma offset; /* mem.offset offset */
287 bfd_vma base; /* mem.offset base */
290 int path; /* number of alt. entry points seen */
291 const char **entry_labels; /* labels of all alternate paths in
292 the current DV-checking block. */
293 int maxpaths; /* size currently allocated for
295 /* Support for hardware errata workarounds. */
297 /* Record data about the last three insn groups. */
300 /* B-step workaround.
301 For each predicate register, this is set if the corresponding insn
302 group conditionally sets this register with one of the affected
305 /* B-step workaround.
306 For each general register, this is set if the corresponding insn
307 a) is conditional one one of the predicate registers for which
308 P_REG_SET is 1 in the corresponding entry of the previous group,
309 b) sets this general register with one of the affected
311 int g_reg_set_conditionally[128];
315 int pointer_size; /* size in bytes of a pointer */
316 int pointer_size_shift; /* shift size of a pointer for alignment */
320 /* application registers: */
326 #define AR_BSPSTORE 18
341 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
342 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
343 {"ar.rsc", 16}, {"ar.bsp", 17},
344 {"ar.bspstore", 18}, {"ar.rnat", 19},
345 {"ar.fcr", 21}, {"ar.eflag", 24},
346 {"ar.csd", 25}, {"ar.ssd", 26},
347 {"ar.cflg", 27}, {"ar.fsr", 28},
348 {"ar.fir", 29}, {"ar.fdr", 30},
349 {"ar.ccv", 32}, {"ar.unat", 36},
350 {"ar.fpsr", 40}, {"ar.itc", 44},
351 {"ar.pfs", 64}, {"ar.lc", 65},
372 /* control registers: */
414 static const struct const_desc
421 /* PSR constant masks: */
424 {"psr.be", ((valueT) 1) << 1},
425 {"psr.up", ((valueT) 1) << 2},
426 {"psr.ac", ((valueT) 1) << 3},
427 {"psr.mfl", ((valueT) 1) << 4},
428 {"psr.mfh", ((valueT) 1) << 5},
430 {"psr.ic", ((valueT) 1) << 13},
431 {"psr.i", ((valueT) 1) << 14},
432 {"psr.pk", ((valueT) 1) << 15},
434 {"psr.dt", ((valueT) 1) << 17},
435 {"psr.dfl", ((valueT) 1) << 18},
436 {"psr.dfh", ((valueT) 1) << 19},
437 {"psr.sp", ((valueT) 1) << 20},
438 {"psr.pp", ((valueT) 1) << 21},
439 {"psr.di", ((valueT) 1) << 22},
440 {"psr.si", ((valueT) 1) << 23},
441 {"psr.db", ((valueT) 1) << 24},
442 {"psr.lp", ((valueT) 1) << 25},
443 {"psr.tb", ((valueT) 1) << 26},
444 {"psr.rt", ((valueT) 1) << 27},
445 /* 28-31: reserved */
446 /* 32-33: cpl (current privilege level) */
447 {"psr.is", ((valueT) 1) << 34},
448 {"psr.mc", ((valueT) 1) << 35},
449 {"psr.it", ((valueT) 1) << 36},
450 {"psr.id", ((valueT) 1) << 37},
451 {"psr.da", ((valueT) 1) << 38},
452 {"psr.dd", ((valueT) 1) << 39},
453 {"psr.ss", ((valueT) 1) << 40},
454 /* 41-42: ri (restart instruction) */
455 {"psr.ed", ((valueT) 1) << 43},
456 {"psr.bn", ((valueT) 1) << 44},
459 /* indirect register-sets/memory: */
468 { "CPUID", IND_CPUID },
469 { "cpuid", IND_CPUID },
481 /* Pseudo functions used to indicate relocation types (these functions
482 start with an at sign (@). */
504 /* reloc pseudo functions (these must come first!): */
505 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
506 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
507 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
508 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
509 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
510 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
511 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
512 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
513 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
514 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
515 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
516 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
517 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
518 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
519 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
520 { "", 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
521 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
523 /* mbtype4 constants: */
524 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
525 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
526 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
527 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
528 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
530 /* fclass constants: */
531 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
532 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
533 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
534 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
535 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
536 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
537 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
538 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
539 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
541 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
543 /* hint constants: */
544 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
546 /* unwind-related constants: */
547 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
548 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
549 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
550 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_LINUX } },
551 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
552 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
553 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
555 /* unwind-related registers: */
556 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
559 /* 41-bit nop opcodes (one per unit): */
560 static const bfd_vma nop[IA64_NUM_UNITS] =
562 0x0000000000LL, /* NIL => break 0 */
563 0x0008000000LL, /* I-unit nop */
564 0x0008000000LL, /* M-unit nop */
565 0x4000000000LL, /* B-unit nop */
566 0x0008000000LL, /* F-unit nop */
567 0x0008000000LL, /* L-"unit" nop */
568 0x0008000000LL, /* X-unit nop */
571 /* Can't be `const' as it's passed to input routines (which have the
572 habit of setting temporary sentinels. */
573 static char special_section_name[][20] =
575 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
576 {".IA_64.unwind"}, {".IA_64.unwind_info"},
577 {".init_array"}, {".fini_array"}
580 /* The best template for a particular sequence of up to three
582 #define N IA64_NUM_TYPES
583 static unsigned char best_template[N][N][N];
586 /* Resource dependencies currently in effect */
588 int depind; /* dependency index */
589 const struct ia64_dependency *dependency; /* actual dependency */
590 unsigned specific:1, /* is this a specific bit/regno? */
591 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
592 int index; /* specific regno/bit within dependency */
593 int note; /* optional qualifying note (0 if none) */
597 int insn_srlz; /* current insn serialization state */
598 int data_srlz; /* current data serialization state */
599 int qp_regno; /* qualifying predicate for this usage */
600 char *file; /* what file marked this dependency */
601 unsigned int line; /* what line marked this dependency */
602 struct mem_offset mem_offset; /* optional memory offset hint */
603 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
604 int path; /* corresponding code entry index */
606 static int regdepslen = 0;
607 static int regdepstotlen = 0;
608 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
609 static const char *dv_sem[] = { "none", "implied", "impliedf",
610 "data", "instr", "specific", "stop", "other" };
611 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
613 /* Current state of PR mutexation */
614 static struct qpmutex {
617 } *qp_mutexes = NULL; /* QP mutex bitmasks */
618 static int qp_mutexeslen = 0;
619 static int qp_mutexestotlen = 0;
620 static valueT qp_safe_across_calls = 0;
622 /* Current state of PR implications */
623 static struct qp_imply {
626 unsigned p2_branched:1;
628 } *qp_implies = NULL;
629 static int qp_implieslen = 0;
630 static int qp_impliestotlen = 0;
632 /* Keep track of static GR values so that indirect register usage can
633 sometimes be tracked. */
638 } gr_values[128] = {{ 1, 0, 0 }};
640 /* Remember the alignment frag. */
641 static fragS *align_frag;
643 /* These are the routines required to output the various types of
646 /* A slot_number is a frag address plus the slot index (0-2). We use the
647 frag address here so that if there is a section switch in the middle of
648 a function, then instructions emitted to a different section are not
649 counted. Since there may be more than one frag for a function, this
650 means we also need to keep track of which frag this address belongs to
651 so we can compute inter-frag distances. This also nicely solves the
652 problem with nops emitted for align directives, which can't easily be
653 counted, but can easily be derived from frag sizes. */
655 typedef struct unw_rec_list {
657 unsigned long slot_number;
659 unsigned long next_slot_number;
660 fragS *next_slot_frag;
661 struct unw_rec_list *next;
664 #define SLOT_NUM_NOT_SET (unsigned)-1
666 /* Linked list of saved prologue counts. A very poor
667 implementation of a map from label numbers to prologue counts. */
668 typedef struct label_prologue_count
670 struct label_prologue_count *next;
671 unsigned long label_number;
672 unsigned int prologue_count;
673 } label_prologue_count;
677 /* Maintain a list of unwind entries for the current function. */
681 /* Any unwind entires that should be attached to the current slot
682 that an insn is being constructed for. */
683 unw_rec_list *current_entry;
685 /* These are used to create the unwind table entry for this function. */
688 symbolS *info; /* pointer to unwind info */
689 symbolS *personality_routine;
691 subsegT saved_text_subseg;
692 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
694 /* TRUE if processing unwind directives in a prologue region. */
697 unsigned int prologue_count; /* number of .prologues seen so far */
698 /* Prologue counts at previous .label_state directives. */
699 struct label_prologue_count * saved_prologue_counts;
702 /* The input value is a negated offset from psp, and specifies an address
703 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
704 must add 16 and divide by 4 to get the encoded value. */
706 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
708 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
710 /* Forward declarations: */
711 static void set_section PARAMS ((char *name));
712 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
713 unsigned int, unsigned int));
714 static void dot_align (int);
715 static void dot_radix PARAMS ((int));
716 static void dot_special_section PARAMS ((int));
717 static void dot_proc PARAMS ((int));
718 static void dot_fframe PARAMS ((int));
719 static void dot_vframe PARAMS ((int));
720 static void dot_vframesp PARAMS ((int));
721 static void dot_vframepsp PARAMS ((int));
722 static void dot_save PARAMS ((int));
723 static void dot_restore PARAMS ((int));
724 static void dot_restorereg PARAMS ((int));
725 static void dot_restorereg_p PARAMS ((int));
726 static void dot_handlerdata PARAMS ((int));
727 static void dot_unwentry PARAMS ((int));
728 static void dot_altrp PARAMS ((int));
729 static void dot_savemem PARAMS ((int));
730 static void dot_saveg PARAMS ((int));
731 static void dot_savef PARAMS ((int));
732 static void dot_saveb PARAMS ((int));
733 static void dot_savegf PARAMS ((int));
734 static void dot_spill PARAMS ((int));
735 static void dot_spillreg PARAMS ((int));
736 static void dot_spillmem PARAMS ((int));
737 static void dot_spillreg_p PARAMS ((int));
738 static void dot_spillmem_p PARAMS ((int));
739 static void dot_label_state PARAMS ((int));
740 static void dot_copy_state PARAMS ((int));
741 static void dot_unwabi PARAMS ((int));
742 static void dot_personality PARAMS ((int));
743 static void dot_body PARAMS ((int));
744 static void dot_prologue PARAMS ((int));
745 static void dot_endp PARAMS ((int));
746 static void dot_template PARAMS ((int));
747 static void dot_regstk PARAMS ((int));
748 static void dot_rot PARAMS ((int));
749 static void dot_byteorder PARAMS ((int));
750 static void dot_psr PARAMS ((int));
751 static void dot_alias PARAMS ((int));
752 static void dot_ln PARAMS ((int));
753 static char *parse_section_name PARAMS ((void));
754 static void dot_xdata PARAMS ((int));
755 static void stmt_float_cons PARAMS ((int));
756 static void stmt_cons_ua PARAMS ((int));
757 static void dot_xfloat_cons PARAMS ((int));
758 static void dot_xstringer PARAMS ((int));
759 static void dot_xdata_ua PARAMS ((int));
760 static void dot_xfloat_cons_ua PARAMS ((int));
761 static void print_prmask PARAMS ((valueT mask));
762 static void dot_pred_rel PARAMS ((int));
763 static void dot_reg_val PARAMS ((int));
764 static void dot_serialize PARAMS ((int));
765 static void dot_dv_mode PARAMS ((int));
766 static void dot_entry PARAMS ((int));
767 static void dot_mem_offset PARAMS ((int));
768 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
769 static symbolS *declare_register PARAMS ((const char *name, int regnum));
770 static void declare_register_set PARAMS ((const char *, int, int));
771 static unsigned int operand_width PARAMS ((enum ia64_opnd));
772 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
775 static int parse_operand PARAMS ((expressionS *e));
776 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
777 static int errata_nop_necessary_p PARAMS ((struct slot *, enum ia64_unit));
778 static void build_insn PARAMS ((struct slot *, bfd_vma *));
779 static void emit_one_bundle PARAMS ((void));
780 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
781 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
782 bfd_reloc_code_real_type r_type));
783 static void insn_group_break PARAMS ((int, int, int));
784 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
785 struct rsrc *, int depind, int path));
786 static void add_qp_mutex PARAMS((valueT mask));
787 static void add_qp_imply PARAMS((int p1, int p2));
788 static void clear_qp_branch_flag PARAMS((valueT mask));
789 static void clear_qp_mutex PARAMS((valueT mask));
790 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
791 static int has_suffix_p PARAMS((const char *, const char *));
792 static void clear_register_values PARAMS ((void));
793 static void print_dependency PARAMS ((const char *action, int depind));
794 static void instruction_serialization PARAMS ((void));
795 static void data_serialization PARAMS ((void));
796 static void remove_marked_resource PARAMS ((struct rsrc *));
797 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
798 static int is_taken_branch PARAMS ((struct ia64_opcode *));
799 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
800 static int depends_on PARAMS ((int, struct ia64_opcode *));
801 static int specify_resource PARAMS ((const struct ia64_dependency *,
802 struct ia64_opcode *, int, struct rsrc [], int, int));
803 static int check_dv PARAMS((struct ia64_opcode *idesc));
804 static void check_dependencies PARAMS((struct ia64_opcode *));
805 static void mark_resources PARAMS((struct ia64_opcode *));
806 static void update_dependencies PARAMS((struct ia64_opcode *));
807 static void note_register_values PARAMS((struct ia64_opcode *));
808 static int qp_mutex PARAMS ((int, int, int));
809 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
810 static void output_vbyte_mem PARAMS ((int, char *, char *));
811 static void count_output PARAMS ((int, char *, char *));
812 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
813 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
814 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
815 static void output_P1_format PARAMS ((vbyte_func, int));
816 static void output_P2_format PARAMS ((vbyte_func, int, int));
817 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
818 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
819 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
820 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
821 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
822 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
823 static void output_P9_format PARAMS ((vbyte_func, int, int));
824 static void output_P10_format PARAMS ((vbyte_func, int, int));
825 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
826 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
827 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
828 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
829 static char format_ab_reg PARAMS ((int, int));
830 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
832 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
833 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
835 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
836 static unw_rec_list *output_endp PARAMS ((void));
837 static unw_rec_list *output_prologue PARAMS ((void));
838 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
839 static unw_rec_list *output_body PARAMS ((void));
840 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
841 static unw_rec_list *output_mem_stack_v PARAMS ((void));
842 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
843 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
844 static unw_rec_list *output_rp_when PARAMS ((void));
845 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
846 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
847 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
848 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
849 static unw_rec_list *output_pfs_when PARAMS ((void));
850 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
851 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
852 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
853 static unw_rec_list *output_preds_when PARAMS ((void));
854 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
855 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
856 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
857 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
858 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
859 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
860 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
861 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
862 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
863 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
864 static unw_rec_list *output_unat_when PARAMS ((void));
865 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
866 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
867 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
868 static unw_rec_list *output_lc_when PARAMS ((void));
869 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
870 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
871 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
872 static unw_rec_list *output_fpsr_when PARAMS ((void));
873 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
874 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
875 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
876 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
877 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
878 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
879 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
880 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
881 static unw_rec_list *output_bsp_when PARAMS ((void));
882 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
883 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
884 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
885 static unw_rec_list *output_bspstore_when PARAMS ((void));
886 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
887 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
888 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
889 static unw_rec_list *output_rnat_when PARAMS ((void));
890 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
891 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
892 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
893 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
894 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
895 static unw_rec_list *output_label_state PARAMS ((unsigned long));
896 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
897 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
898 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
899 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
901 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
903 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
905 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
906 unsigned int, unsigned int));
907 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
908 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
909 static int calc_record_size PARAMS ((unw_rec_list *));
910 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
911 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
912 unsigned long, fragS *,
914 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
915 static void fixup_unw_records PARAMS ((unw_rec_list *, int));
916 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
917 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
918 static unsigned int get_saved_prologue_count PARAMS ((unsigned long));
919 static void save_prologue_count PARAMS ((unsigned long, unsigned int));
920 static void free_saved_prologue_counts PARAMS ((void));
922 /* Determine if application register REGNUM resides only in the integer
923 unit (as opposed to the memory unit). */
925 ar_is_only_in_integer_unit (int reg)
928 return reg >= 64 && reg <= 111;
931 /* Determine if application register REGNUM resides only in the memory
932 unit (as opposed to the integer unit). */
934 ar_is_only_in_memory_unit (int reg)
937 return reg >= 0 && reg <= 47;
940 /* Switch to section NAME and create section if necessary. It's
941 rather ugly that we have to manipulate input_line_pointer but I
942 don't see any other way to accomplish the same thing without
943 changing obj-elf.c (which may be the Right Thing, in the end). */
948 char *saved_input_line_pointer;
950 saved_input_line_pointer = input_line_pointer;
951 input_line_pointer = name;
953 input_line_pointer = saved_input_line_pointer;
956 /* Map 's' to SHF_IA_64_SHORT. */
959 ia64_elf_section_letter (letter, ptr_msg)
964 return SHF_IA_64_SHORT;
965 else if (letter == 'o')
966 return SHF_LINK_ORDER;
968 *ptr_msg = _("Bad .section directive: want a,o,s,w,x,M,S,G,T in string");
972 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
975 ia64_elf_section_flags (flags, attr, type)
977 int attr, type ATTRIBUTE_UNUSED;
979 if (attr & SHF_IA_64_SHORT)
980 flags |= SEC_SMALL_DATA;
985 ia64_elf_section_type (str, len)
989 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
991 if (STREQ (ELF_STRING_ia64_unwind_info))
994 if (STREQ (ELF_STRING_ia64_unwind_info_once))
997 if (STREQ (ELF_STRING_ia64_unwind))
998 return SHT_IA_64_UNWIND;
1000 if (STREQ (ELF_STRING_ia64_unwind_once))
1001 return SHT_IA_64_UNWIND;
1003 if (STREQ ("unwind"))
1004 return SHT_IA_64_UNWIND;
1011 set_regstack (ins, locs, outs, rots)
1012 unsigned int ins, locs, outs, rots;
1014 /* Size of frame. */
1017 sof = ins + locs + outs;
1020 as_bad ("Size of frame exceeds maximum of 96 registers");
1025 as_warn ("Size of rotating registers exceeds frame size");
1028 md.in.base = REG_GR + 32;
1029 md.loc.base = md.in.base + ins;
1030 md.out.base = md.loc.base + locs;
1032 md.in.num_regs = ins;
1033 md.loc.num_regs = locs;
1034 md.out.num_regs = outs;
1035 md.rot.num_regs = rots;
1042 struct label_fix *lfix;
1044 subsegT saved_subseg;
1047 if (!md.last_text_seg)
1050 saved_seg = now_seg;
1051 saved_subseg = now_subseg;
1053 subseg_set (md.last_text_seg, 0);
1055 while (md.num_slots_in_use > 0)
1056 emit_one_bundle (); /* force out queued instructions */
1058 /* In case there are labels following the last instruction, resolve
1060 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
1062 S_SET_VALUE (lfix->sym, frag_now_fix ());
1063 symbol_set_frag (lfix->sym, frag_now);
1065 CURR_SLOT.label_fixups = 0;
1066 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1068 S_SET_VALUE (lfix->sym, frag_now_fix ());
1069 symbol_set_frag (lfix->sym, frag_now);
1071 CURR_SLOT.tag_fixups = 0;
1073 /* In case there are unwind directives following the last instruction,
1074 resolve those now. We only handle prologue, body, and endp directives
1075 here. Give an error for others. */
1076 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1078 switch (ptr->r.type)
1084 ptr->slot_number = (unsigned long) frag_more (0);
1085 ptr->slot_frag = frag_now;
1088 /* Allow any record which doesn't have a "t" field (i.e.,
1089 doesn't relate to a particular instruction). */
1105 as_bad (_("Unwind directive not followed by an instruction."));
1109 unwind.current_entry = NULL;
1111 subseg_set (saved_seg, saved_subseg);
1113 if (md.qp.X_op == O_register)
1114 as_bad ("qualifying predicate not followed by instruction");
1118 ia64_do_align (int nbytes)
1120 char *saved_input_line_pointer = input_line_pointer;
1122 input_line_pointer = "";
1123 s_align_bytes (nbytes);
1124 input_line_pointer = saved_input_line_pointer;
1128 ia64_cons_align (nbytes)
1133 char *saved_input_line_pointer = input_line_pointer;
1134 input_line_pointer = "";
1135 s_align_bytes (nbytes);
1136 input_line_pointer = saved_input_line_pointer;
1140 /* Output COUNT bytes to a memory location. */
1141 static unsigned char *vbyte_mem_ptr = NULL;
1144 output_vbyte_mem (count, ptr, comment)
1147 char *comment ATTRIBUTE_UNUSED;
1150 if (vbyte_mem_ptr == NULL)
1155 for (x = 0; x < count; x++)
1156 *(vbyte_mem_ptr++) = ptr[x];
1159 /* Count the number of bytes required for records. */
1160 static int vbyte_count = 0;
1162 count_output (count, ptr, comment)
1164 char *ptr ATTRIBUTE_UNUSED;
1165 char *comment ATTRIBUTE_UNUSED;
1167 vbyte_count += count;
1171 output_R1_format (f, rtype, rlen)
1173 unw_record_type rtype;
1180 output_R3_format (f, rtype, rlen);
1186 else if (rtype != prologue)
1187 as_bad ("record type is not valid");
1189 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1190 (*f) (1, &byte, NULL);
1194 output_R2_format (f, mask, grsave, rlen)
1201 mask = (mask & 0x0f);
1202 grsave = (grsave & 0x7f);
1204 bytes[0] = (UNW_R2 | (mask >> 1));
1205 bytes[1] = (((mask & 0x01) << 7) | grsave);
1206 count += output_leb128 (bytes + 2, rlen, 0);
1207 (*f) (count, bytes, NULL);
1211 output_R3_format (f, rtype, rlen)
1213 unw_record_type rtype;
1220 output_R1_format (f, rtype, rlen);
1226 else if (rtype != prologue)
1227 as_bad ("record type is not valid");
1228 bytes[0] = (UNW_R3 | r);
1229 count = output_leb128 (bytes + 1, rlen, 0);
1230 (*f) (count + 1, bytes, NULL);
1234 output_P1_format (f, brmask)
1239 byte = UNW_P1 | (brmask & 0x1f);
1240 (*f) (1, &byte, NULL);
1244 output_P2_format (f, brmask, gr)
1250 brmask = (brmask & 0x1f);
1251 bytes[0] = UNW_P2 | (brmask >> 1);
1252 bytes[1] = (((brmask & 1) << 7) | gr);
1253 (*f) (2, bytes, NULL);
1257 output_P3_format (f, rtype, reg)
1259 unw_record_type rtype;
1304 as_bad ("Invalid record type for P3 format.");
1306 bytes[0] = (UNW_P3 | (r >> 1));
1307 bytes[1] = (((r & 1) << 7) | reg);
1308 (*f) (2, bytes, NULL);
1312 output_P4_format (f, imask, imask_size)
1314 unsigned char *imask;
1315 unsigned long imask_size;
1318 (*f) (imask_size, imask, NULL);
1322 output_P5_format (f, grmask, frmask)
1325 unsigned long frmask;
1328 grmask = (grmask & 0x0f);
1331 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1332 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1333 bytes[3] = (frmask & 0x000000ff);
1334 (*f) (4, bytes, NULL);
1338 output_P6_format (f, rtype, rmask)
1340 unw_record_type rtype;
1346 if (rtype == gr_mem)
1348 else if (rtype != fr_mem)
1349 as_bad ("Invalid record type for format P6");
1350 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1351 (*f) (1, &byte, NULL);
1355 output_P7_format (f, rtype, w1, w2)
1357 unw_record_type rtype;
1364 count += output_leb128 (bytes + 1, w1, 0);
1369 count += output_leb128 (bytes + count, w2 >> 4, 0);
1419 bytes[0] = (UNW_P7 | r);
1420 (*f) (count, bytes, NULL);
1424 output_P8_format (f, rtype, t)
1426 unw_record_type rtype;
1465 case bspstore_psprel:
1468 case bspstore_sprel:
1480 case priunat_when_gr:
1483 case priunat_psprel:
1489 case priunat_when_mem:
1496 count += output_leb128 (bytes + 2, t, 0);
1497 (*f) (count, bytes, NULL);
1501 output_P9_format (f, grmask, gr)
1508 bytes[1] = (grmask & 0x0f);
1509 bytes[2] = (gr & 0x7f);
1510 (*f) (3, bytes, NULL);
1514 output_P10_format (f, abi, context)
1521 bytes[1] = (abi & 0xff);
1522 bytes[2] = (context & 0xff);
1523 (*f) (3, bytes, NULL);
1527 output_B1_format (f, rtype, label)
1529 unw_record_type rtype;
1530 unsigned long label;
1536 output_B4_format (f, rtype, label);
1539 if (rtype == copy_state)
1541 else if (rtype != label_state)
1542 as_bad ("Invalid record type for format B1");
1544 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1545 (*f) (1, &byte, NULL);
1549 output_B2_format (f, ecount, t)
1551 unsigned long ecount;
1558 output_B3_format (f, ecount, t);
1561 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1562 count += output_leb128 (bytes + 1, t, 0);
1563 (*f) (count, bytes, NULL);
1567 output_B3_format (f, ecount, t)
1569 unsigned long ecount;
1576 output_B2_format (f, ecount, t);
1580 count += output_leb128 (bytes + 1, t, 0);
1581 count += output_leb128 (bytes + count, ecount, 0);
1582 (*f) (count, bytes, NULL);
1586 output_B4_format (f, rtype, label)
1588 unw_record_type rtype;
1589 unsigned long label;
1596 output_B1_format (f, rtype, label);
1600 if (rtype == copy_state)
1602 else if (rtype != label_state)
1603 as_bad ("Invalid record type for format B1");
1605 bytes[0] = (UNW_B4 | (r << 3));
1606 count += output_leb128 (bytes + 1, label, 0);
1607 (*f) (count, bytes, NULL);
1611 format_ab_reg (ab, reg)
1618 ret = (ab << 5) | reg;
1623 output_X1_format (f, rtype, ab, reg, t, w1)
1625 unw_record_type rtype;
1635 if (rtype == spill_sprel)
1637 else if (rtype != spill_psprel)
1638 as_bad ("Invalid record type for format X1");
1639 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1640 count += output_leb128 (bytes + 2, t, 0);
1641 count += output_leb128 (bytes + count, w1, 0);
1642 (*f) (count, bytes, NULL);
1646 output_X2_format (f, ab, reg, x, y, treg, t)
1655 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1656 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1657 count += output_leb128 (bytes + 3, t, 0);
1658 (*f) (count, bytes, NULL);
1662 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1664 unw_record_type rtype;
1675 if (rtype == spill_sprel_p)
1677 else if (rtype != spill_psprel_p)
1678 as_bad ("Invalid record type for format X3");
1679 bytes[1] = ((r << 7) | (qp & 0x3f));
1680 bytes[2] = format_ab_reg (ab, reg);
1681 count += output_leb128 (bytes + 3, t, 0);
1682 count += output_leb128 (bytes + count, w1, 0);
1683 (*f) (count, bytes, NULL);
1687 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1697 bytes[1] = (qp & 0x3f);
1698 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1699 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1700 count += output_leb128 (bytes + 4, t, 0);
1701 (*f) (count, bytes, NULL);
1704 /* This function allocates a record list structure, and initializes fields. */
1706 static unw_rec_list *
1707 alloc_record (unw_record_type t)
1710 ptr = xmalloc (sizeof (*ptr));
1712 ptr->slot_number = SLOT_NUM_NOT_SET;
1714 ptr->next_slot_number = 0;
1715 ptr->next_slot_frag = 0;
1719 /* Dummy unwind record used for calculating the length of the last prologue or
1722 static unw_rec_list *
1725 unw_rec_list *ptr = alloc_record (endp);
1729 static unw_rec_list *
1732 unw_rec_list *ptr = alloc_record (prologue);
1733 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1737 static unw_rec_list *
1738 output_prologue_gr (saved_mask, reg)
1739 unsigned int saved_mask;
1742 unw_rec_list *ptr = alloc_record (prologue_gr);
1743 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1744 ptr->r.record.r.grmask = saved_mask;
1745 ptr->r.record.r.grsave = reg;
1749 static unw_rec_list *
1752 unw_rec_list *ptr = alloc_record (body);
1756 static unw_rec_list *
1757 output_mem_stack_f (size)
1760 unw_rec_list *ptr = alloc_record (mem_stack_f);
1761 ptr->r.record.p.size = size;
1765 static unw_rec_list *
1766 output_mem_stack_v ()
1768 unw_rec_list *ptr = alloc_record (mem_stack_v);
1772 static unw_rec_list *
1776 unw_rec_list *ptr = alloc_record (psp_gr);
1777 ptr->r.record.p.gr = gr;
1781 static unw_rec_list *
1782 output_psp_sprel (offset)
1783 unsigned int offset;
1785 unw_rec_list *ptr = alloc_record (psp_sprel);
1786 ptr->r.record.p.spoff = offset / 4;
1790 static unw_rec_list *
1793 unw_rec_list *ptr = alloc_record (rp_when);
1797 static unw_rec_list *
1801 unw_rec_list *ptr = alloc_record (rp_gr);
1802 ptr->r.record.p.gr = gr;
1806 static unw_rec_list *
1810 unw_rec_list *ptr = alloc_record (rp_br);
1811 ptr->r.record.p.br = br;
1815 static unw_rec_list *
1816 output_rp_psprel (offset)
1817 unsigned int offset;
1819 unw_rec_list *ptr = alloc_record (rp_psprel);
1820 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1824 static unw_rec_list *
1825 output_rp_sprel (offset)
1826 unsigned int offset;
1828 unw_rec_list *ptr = alloc_record (rp_sprel);
1829 ptr->r.record.p.spoff = offset / 4;
1833 static unw_rec_list *
1836 unw_rec_list *ptr = alloc_record (pfs_when);
1840 static unw_rec_list *
1844 unw_rec_list *ptr = alloc_record (pfs_gr);
1845 ptr->r.record.p.gr = gr;
1849 static unw_rec_list *
1850 output_pfs_psprel (offset)
1851 unsigned int offset;
1853 unw_rec_list *ptr = alloc_record (pfs_psprel);
1854 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1858 static unw_rec_list *
1859 output_pfs_sprel (offset)
1860 unsigned int offset;
1862 unw_rec_list *ptr = alloc_record (pfs_sprel);
1863 ptr->r.record.p.spoff = offset / 4;
1867 static unw_rec_list *
1868 output_preds_when ()
1870 unw_rec_list *ptr = alloc_record (preds_when);
1874 static unw_rec_list *
1875 output_preds_gr (gr)
1878 unw_rec_list *ptr = alloc_record (preds_gr);
1879 ptr->r.record.p.gr = gr;
1883 static unw_rec_list *
1884 output_preds_psprel (offset)
1885 unsigned int offset;
1887 unw_rec_list *ptr = alloc_record (preds_psprel);
1888 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1892 static unw_rec_list *
1893 output_preds_sprel (offset)
1894 unsigned int offset;
1896 unw_rec_list *ptr = alloc_record (preds_sprel);
1897 ptr->r.record.p.spoff = offset / 4;
1901 static unw_rec_list *
1902 output_fr_mem (mask)
1905 unw_rec_list *ptr = alloc_record (fr_mem);
1906 ptr->r.record.p.rmask = mask;
1910 static unw_rec_list *
1911 output_frgr_mem (gr_mask, fr_mask)
1912 unsigned int gr_mask;
1913 unsigned int fr_mask;
1915 unw_rec_list *ptr = alloc_record (frgr_mem);
1916 ptr->r.record.p.grmask = gr_mask;
1917 ptr->r.record.p.frmask = fr_mask;
1921 static unw_rec_list *
1922 output_gr_gr (mask, reg)
1926 unw_rec_list *ptr = alloc_record (gr_gr);
1927 ptr->r.record.p.grmask = mask;
1928 ptr->r.record.p.gr = reg;
1932 static unw_rec_list *
1933 output_gr_mem (mask)
1936 unw_rec_list *ptr = alloc_record (gr_mem);
1937 ptr->r.record.p.rmask = mask;
1941 static unw_rec_list *
1942 output_br_mem (unsigned int mask)
1944 unw_rec_list *ptr = alloc_record (br_mem);
1945 ptr->r.record.p.brmask = mask;
1949 static unw_rec_list *
1950 output_br_gr (save_mask, reg)
1951 unsigned int save_mask;
1954 unw_rec_list *ptr = alloc_record (br_gr);
1955 ptr->r.record.p.brmask = save_mask;
1956 ptr->r.record.p.gr = reg;
1960 static unw_rec_list *
1961 output_spill_base (offset)
1962 unsigned int offset;
1964 unw_rec_list *ptr = alloc_record (spill_base);
1965 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1969 static unw_rec_list *
1972 unw_rec_list *ptr = alloc_record (unat_when);
1976 static unw_rec_list *
1980 unw_rec_list *ptr = alloc_record (unat_gr);
1981 ptr->r.record.p.gr = gr;
1985 static unw_rec_list *
1986 output_unat_psprel (offset)
1987 unsigned int offset;
1989 unw_rec_list *ptr = alloc_record (unat_psprel);
1990 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1994 static unw_rec_list *
1995 output_unat_sprel (offset)
1996 unsigned int offset;
1998 unw_rec_list *ptr = alloc_record (unat_sprel);
1999 ptr->r.record.p.spoff = offset / 4;
2003 static unw_rec_list *
2006 unw_rec_list *ptr = alloc_record (lc_when);
2010 static unw_rec_list *
2014 unw_rec_list *ptr = alloc_record (lc_gr);
2015 ptr->r.record.p.gr = gr;
2019 static unw_rec_list *
2020 output_lc_psprel (offset)
2021 unsigned int offset;
2023 unw_rec_list *ptr = alloc_record (lc_psprel);
2024 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2028 static unw_rec_list *
2029 output_lc_sprel (offset)
2030 unsigned int offset;
2032 unw_rec_list *ptr = alloc_record (lc_sprel);
2033 ptr->r.record.p.spoff = offset / 4;
2037 static unw_rec_list *
2040 unw_rec_list *ptr = alloc_record (fpsr_when);
2044 static unw_rec_list *
2048 unw_rec_list *ptr = alloc_record (fpsr_gr);
2049 ptr->r.record.p.gr = gr;
2053 static unw_rec_list *
2054 output_fpsr_psprel (offset)
2055 unsigned int offset;
2057 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2058 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2062 static unw_rec_list *
2063 output_fpsr_sprel (offset)
2064 unsigned int offset;
2066 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2067 ptr->r.record.p.spoff = offset / 4;
2071 static unw_rec_list *
2072 output_priunat_when_gr ()
2074 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2078 static unw_rec_list *
2079 output_priunat_when_mem ()
2081 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2085 static unw_rec_list *
2086 output_priunat_gr (gr)
2089 unw_rec_list *ptr = alloc_record (priunat_gr);
2090 ptr->r.record.p.gr = gr;
2094 static unw_rec_list *
2095 output_priunat_psprel (offset)
2096 unsigned int offset;
2098 unw_rec_list *ptr = alloc_record (priunat_psprel);
2099 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2103 static unw_rec_list *
2104 output_priunat_sprel (offset)
2105 unsigned int offset;
2107 unw_rec_list *ptr = alloc_record (priunat_sprel);
2108 ptr->r.record.p.spoff = offset / 4;
2112 static unw_rec_list *
2115 unw_rec_list *ptr = alloc_record (bsp_when);
2119 static unw_rec_list *
2123 unw_rec_list *ptr = alloc_record (bsp_gr);
2124 ptr->r.record.p.gr = gr;
2128 static unw_rec_list *
2129 output_bsp_psprel (offset)
2130 unsigned int offset;
2132 unw_rec_list *ptr = alloc_record (bsp_psprel);
2133 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2137 static unw_rec_list *
2138 output_bsp_sprel (offset)
2139 unsigned int offset;
2141 unw_rec_list *ptr = alloc_record (bsp_sprel);
2142 ptr->r.record.p.spoff = offset / 4;
2146 static unw_rec_list *
2147 output_bspstore_when ()
2149 unw_rec_list *ptr = alloc_record (bspstore_when);
2153 static unw_rec_list *
2154 output_bspstore_gr (gr)
2157 unw_rec_list *ptr = alloc_record (bspstore_gr);
2158 ptr->r.record.p.gr = gr;
2162 static unw_rec_list *
2163 output_bspstore_psprel (offset)
2164 unsigned int offset;
2166 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2167 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2171 static unw_rec_list *
2172 output_bspstore_sprel (offset)
2173 unsigned int offset;
2175 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2176 ptr->r.record.p.spoff = offset / 4;
2180 static unw_rec_list *
2183 unw_rec_list *ptr = alloc_record (rnat_when);
2187 static unw_rec_list *
2191 unw_rec_list *ptr = alloc_record (rnat_gr);
2192 ptr->r.record.p.gr = gr;
2196 static unw_rec_list *
2197 output_rnat_psprel (offset)
2198 unsigned int offset;
2200 unw_rec_list *ptr = alloc_record (rnat_psprel);
2201 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2205 static unw_rec_list *
2206 output_rnat_sprel (offset)
2207 unsigned int offset;
2209 unw_rec_list *ptr = alloc_record (rnat_sprel);
2210 ptr->r.record.p.spoff = offset / 4;
2214 static unw_rec_list *
2215 output_unwabi (abi, context)
2217 unsigned long context;
2219 unw_rec_list *ptr = alloc_record (unwabi);
2220 ptr->r.record.p.abi = abi;
2221 ptr->r.record.p.context = context;
2225 static unw_rec_list *
2226 output_epilogue (unsigned long ecount)
2228 unw_rec_list *ptr = alloc_record (epilogue);
2229 ptr->r.record.b.ecount = ecount;
2233 static unw_rec_list *
2234 output_label_state (unsigned long label)
2236 unw_rec_list *ptr = alloc_record (label_state);
2237 ptr->r.record.b.label = label;
2241 static unw_rec_list *
2242 output_copy_state (unsigned long label)
2244 unw_rec_list *ptr = alloc_record (copy_state);
2245 ptr->r.record.b.label = label;
2249 static unw_rec_list *
2250 output_spill_psprel (ab, reg, offset)
2253 unsigned int offset;
2255 unw_rec_list *ptr = alloc_record (spill_psprel);
2256 ptr->r.record.x.ab = ab;
2257 ptr->r.record.x.reg = reg;
2258 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2262 static unw_rec_list *
2263 output_spill_sprel (ab, reg, offset)
2266 unsigned int offset;
2268 unw_rec_list *ptr = alloc_record (spill_sprel);
2269 ptr->r.record.x.ab = ab;
2270 ptr->r.record.x.reg = reg;
2271 ptr->r.record.x.spoff = offset / 4;
2275 static unw_rec_list *
2276 output_spill_psprel_p (ab, reg, offset, predicate)
2279 unsigned int offset;
2280 unsigned int predicate;
2282 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2283 ptr->r.record.x.ab = ab;
2284 ptr->r.record.x.reg = reg;
2285 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2286 ptr->r.record.x.qp = predicate;
2290 static unw_rec_list *
2291 output_spill_sprel_p (ab, reg, offset, predicate)
2294 unsigned int offset;
2295 unsigned int predicate;
2297 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2298 ptr->r.record.x.ab = ab;
2299 ptr->r.record.x.reg = reg;
2300 ptr->r.record.x.spoff = offset / 4;
2301 ptr->r.record.x.qp = predicate;
2305 static unw_rec_list *
2306 output_spill_reg (ab, reg, targ_reg, xy)
2309 unsigned int targ_reg;
2312 unw_rec_list *ptr = alloc_record (spill_reg);
2313 ptr->r.record.x.ab = ab;
2314 ptr->r.record.x.reg = reg;
2315 ptr->r.record.x.treg = targ_reg;
2316 ptr->r.record.x.xy = xy;
2320 static unw_rec_list *
2321 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2324 unsigned int targ_reg;
2326 unsigned int predicate;
2328 unw_rec_list *ptr = alloc_record (spill_reg_p);
2329 ptr->r.record.x.ab = ab;
2330 ptr->r.record.x.reg = reg;
2331 ptr->r.record.x.treg = targ_reg;
2332 ptr->r.record.x.xy = xy;
2333 ptr->r.record.x.qp = predicate;
2337 /* Given a unw_rec_list process the correct format with the
2338 specified function. */
2341 process_one_record (ptr, f)
2345 unsigned long fr_mask, gr_mask;
2347 switch (ptr->r.type)
2349 /* This is a dummy record that takes up no space in the output. */
2357 /* These are taken care of by prologue/prologue_gr. */
2362 if (ptr->r.type == prologue_gr)
2363 output_R2_format (f, ptr->r.record.r.grmask,
2364 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2366 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2368 /* Output descriptor(s) for union of register spills (if any). */
2369 gr_mask = ptr->r.record.r.mask.gr_mem;
2370 fr_mask = ptr->r.record.r.mask.fr_mem;
2373 if ((fr_mask & ~0xfUL) == 0)
2374 output_P6_format (f, fr_mem, fr_mask);
2377 output_P5_format (f, gr_mask, fr_mask);
2382 output_P6_format (f, gr_mem, gr_mask);
2383 if (ptr->r.record.r.mask.br_mem)
2384 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2386 /* output imask descriptor if necessary: */
2387 if (ptr->r.record.r.mask.i)
2388 output_P4_format (f, ptr->r.record.r.mask.i,
2389 ptr->r.record.r.imask_size);
2393 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2397 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2398 ptr->r.record.p.size);
2411 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2414 output_P3_format (f, rp_br, ptr->r.record.p.br);
2417 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2425 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2434 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2444 case bspstore_sprel:
2446 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2449 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2452 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2455 as_bad ("spill_mask record unimplemented.");
2457 case priunat_when_gr:
2458 case priunat_when_mem:
2462 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2464 case priunat_psprel:
2466 case bspstore_psprel:
2468 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2471 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2474 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2478 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2481 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2482 ptr->r.record.x.reg, ptr->r.record.x.t,
2483 ptr->r.record.x.pspoff);
2486 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2487 ptr->r.record.x.reg, ptr->r.record.x.t,
2488 ptr->r.record.x.spoff);
2491 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2492 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2493 ptr->r.record.x.treg, ptr->r.record.x.t);
2495 case spill_psprel_p:
2496 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2497 ptr->r.record.x.ab, ptr->r.record.x.reg,
2498 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2501 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2502 ptr->r.record.x.ab, ptr->r.record.x.reg,
2503 ptr->r.record.x.t, ptr->r.record.x.spoff);
2506 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2507 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2508 ptr->r.record.x.xy, ptr->r.record.x.treg,
2512 as_bad ("record_type_not_valid");
2517 /* Given a unw_rec_list list, process all the records with
2518 the specified function. */
2520 process_unw_records (list, f)
2525 for (ptr = list; ptr; ptr = ptr->next)
2526 process_one_record (ptr, f);
2529 /* Determine the size of a record list in bytes. */
2531 calc_record_size (list)
2535 process_unw_records (list, count_output);
2539 /* Update IMASK bitmask to reflect the fact that one or more registers
2540 of type TYPE are saved starting at instruction with index T. If N
2541 bits are set in REGMASK, it is assumed that instructions T through
2542 T+N-1 save these registers.
2546 1: instruction saves next fp reg
2547 2: instruction saves next general reg
2548 3: instruction saves next branch reg */
2550 set_imask (region, regmask, t, type)
2551 unw_rec_list *region;
2552 unsigned long regmask;
2556 unsigned char *imask;
2557 unsigned long imask_size;
2561 imask = region->r.record.r.mask.i;
2562 imask_size = region->r.record.r.imask_size;
2565 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2566 imask = xmalloc (imask_size);
2567 memset (imask, 0, imask_size);
2569 region->r.record.r.imask_size = imask_size;
2570 region->r.record.r.mask.i = imask;
2574 pos = 2 * (3 - t % 4);
2577 if (i >= imask_size)
2579 as_bad ("Ignoring attempt to spill beyond end of region");
2583 imask[i] |= (type & 0x3) << pos;
2585 regmask &= (regmask - 1);
2595 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2596 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2597 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2601 slot_index (slot_addr, slot_frag, first_addr, first_frag, before_relax)
2602 unsigned long slot_addr;
2604 unsigned long first_addr;
2608 unsigned long index = 0;
2610 /* First time we are called, the initial address and frag are invalid. */
2611 if (first_addr == 0)
2614 /* If the two addresses are in different frags, then we need to add in
2615 the remaining size of this frag, and then the entire size of intermediate
2617 while (slot_frag != first_frag)
2619 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2623 /* We can get the final addresses only during and after
2625 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2626 index += 3 * ((first_frag->fr_next->fr_address
2627 - first_frag->fr_address
2628 - first_frag->fr_fix) >> 4);
2631 /* We don't know what the final addresses will be. We try our
2632 best to estimate. */
2633 switch (first_frag->fr_type)
2639 as_fatal ("only constant space allocation is supported");
2645 /* Take alignment into account. Assume the worst case
2646 before relaxation. */
2647 index += 3 * ((1 << first_frag->fr_offset) >> 4);
2651 if (first_frag->fr_symbol)
2653 as_fatal ("only constant offsets are supported");
2657 index += 3 * (first_frag->fr_offset >> 4);
2661 /* Add in the full size of the frag converted to instruction slots. */
2662 index += 3 * (first_frag->fr_fix >> 4);
2663 /* Subtract away the initial part before first_addr. */
2664 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2665 + ((first_addr & 0x3) - (start_addr & 0x3)));
2667 /* Move to the beginning of the next frag. */
2668 first_frag = first_frag->fr_next;
2669 first_addr = (unsigned long) &first_frag->fr_literal;
2672 /* Add in the used part of the last frag. */
2673 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2674 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2678 /* Optimize unwind record directives. */
2680 static unw_rec_list *
2681 optimize_unw_records (list)
2687 /* If the only unwind record is ".prologue" or ".prologue" followed
2688 by ".body", then we can optimize the unwind directives away. */
2689 if (list->r.type == prologue
2690 && (list->next->r.type == endp
2691 || (list->next->r.type == body && list->next->next->r.type == endp)))
2697 /* Given a complete record list, process any records which have
2698 unresolved fields, (ie length counts for a prologue). After
2699 this has been run, all necessary information should be available
2700 within each record to generate an image. */
2703 fixup_unw_records (list, before_relax)
2707 unw_rec_list *ptr, *region = 0;
2708 unsigned long first_addr = 0, rlen = 0, t;
2709 fragS *first_frag = 0;
2711 for (ptr = list; ptr; ptr = ptr->next)
2713 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2714 as_bad (" Insn slot not set in unwind record.");
2715 t = slot_index (ptr->slot_number, ptr->slot_frag,
2716 first_addr, first_frag, before_relax);
2717 switch (ptr->r.type)
2725 unsigned long last_addr = 0;
2726 fragS *last_frag = NULL;
2728 first_addr = ptr->slot_number;
2729 first_frag = ptr->slot_frag;
2730 /* Find either the next body/prologue start, or the end of
2731 the function, and determine the size of the region. */
2732 for (last = ptr->next; last != NULL; last = last->next)
2733 if (last->r.type == prologue || last->r.type == prologue_gr
2734 || last->r.type == body || last->r.type == endp)
2736 last_addr = last->slot_number;
2737 last_frag = last->slot_frag;
2740 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2742 rlen = ptr->r.record.r.rlen = size;
2743 if (ptr->r.type == body)
2744 /* End of region. */
2752 ptr->r.record.b.t = rlen - 1 - t;
2754 /* This happens when a memory-stack-less procedure uses a
2755 ".restore sp" directive at the end of a region to pop
2757 ptr->r.record.b.t = 0;
2768 case priunat_when_gr:
2769 case priunat_when_mem:
2773 ptr->r.record.p.t = t;
2781 case spill_psprel_p:
2782 ptr->r.record.x.t = t;
2788 as_bad ("frgr_mem record before region record!\n");
2791 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2792 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2793 set_imask (region, ptr->r.record.p.frmask, t, 1);
2794 set_imask (region, ptr->r.record.p.grmask, t, 2);
2799 as_bad ("fr_mem record before region record!\n");
2802 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2803 set_imask (region, ptr->r.record.p.rmask, t, 1);
2808 as_bad ("gr_mem record before region record!\n");
2811 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2812 set_imask (region, ptr->r.record.p.rmask, t, 2);
2817 as_bad ("br_mem record before region record!\n");
2820 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2821 set_imask (region, ptr->r.record.p.brmask, t, 3);
2827 as_bad ("gr_gr record before region record!\n");
2830 set_imask (region, ptr->r.record.p.grmask, t, 2);
2835 as_bad ("br_gr record before region record!\n");
2838 set_imask (region, ptr->r.record.p.brmask, t, 3);
2847 /* Estimate the size of a frag before relaxing. We only have one type of frag
2848 to handle here, which is the unwind info frag. */
2851 ia64_estimate_size_before_relax (fragS *frag,
2852 asection *segtype ATTRIBUTE_UNUSED)
2857 /* ??? This code is identical to the first part of ia64_convert_frag. */
2858 list = (unw_rec_list *) frag->fr_opcode;
2859 fixup_unw_records (list, 0);
2861 len = calc_record_size (list);
2862 /* pad to pointer-size boundary. */
2863 pad = len % md.pointer_size;
2865 len += md.pointer_size - pad;
2866 /* Add 8 for the header + a pointer for the personality offset. */
2867 size = len + 8 + md.pointer_size;
2869 /* fr_var carries the max_chars that we created the fragment with.
2870 We must, of course, have allocated enough memory earlier. */
2871 assert (frag->fr_var >= size);
2873 return frag->fr_fix + size;
2876 /* This function converts a rs_machine_dependent variant frag into a
2877 normal fill frag with the unwind image from the the record list. */
2879 ia64_convert_frag (fragS *frag)
2885 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2886 list = (unw_rec_list *) frag->fr_opcode;
2887 fixup_unw_records (list, 0);
2889 len = calc_record_size (list);
2890 /* pad to pointer-size boundary. */
2891 pad = len % md.pointer_size;
2893 len += md.pointer_size - pad;
2894 /* Add 8 for the header + a pointer for the personality offset. */
2895 size = len + 8 + md.pointer_size;
2897 /* fr_var carries the max_chars that we created the fragment with.
2898 We must, of course, have allocated enough memory earlier. */
2899 assert (frag->fr_var >= size);
2901 /* Initialize the header area. fr_offset is initialized with
2902 unwind.personality_routine. */
2903 if (frag->fr_offset)
2905 if (md.flags & EF_IA_64_ABI64)
2906 flag_value = (bfd_vma) 3 << 32;
2908 /* 32-bit unwind info block. */
2909 flag_value = (bfd_vma) 0x1003 << 32;
2914 md_number_to_chars (frag->fr_literal,
2915 (((bfd_vma) 1 << 48) /* Version. */
2916 | flag_value /* U & E handler flags. */
2917 | (len / md.pointer_size)), /* Length. */
2920 /* Skip the header. */
2921 vbyte_mem_ptr = frag->fr_literal + 8;
2922 process_unw_records (list, output_vbyte_mem);
2924 /* Fill the padding bytes with zeros. */
2926 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
2927 md.pointer_size - pad);
2929 frag->fr_fix += size;
2930 frag->fr_type = rs_fill;
2932 frag->fr_offset = 0;
2936 convert_expr_to_ab_reg (e, ab, regp)
2943 if (e->X_op != O_register)
2946 reg = e->X_add_number;
2947 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2950 *regp = reg - REG_GR;
2952 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2953 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2956 *regp = reg - REG_FR;
2958 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2961 *regp = reg - REG_BR;
2968 case REG_PR: *regp = 0; break;
2969 case REG_PSP: *regp = 1; break;
2970 case REG_PRIUNAT: *regp = 2; break;
2971 case REG_BR + 0: *regp = 3; break;
2972 case REG_AR + AR_BSP: *regp = 4; break;
2973 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2974 case REG_AR + AR_RNAT: *regp = 6; break;
2975 case REG_AR + AR_UNAT: *regp = 7; break;
2976 case REG_AR + AR_FPSR: *regp = 8; break;
2977 case REG_AR + AR_PFS: *regp = 9; break;
2978 case REG_AR + AR_LC: *regp = 10; break;
2988 convert_expr_to_xy_reg (e, xy, regp)
2995 if (e->X_op != O_register)
2998 reg = e->X_add_number;
3000 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
3003 *regp = reg - REG_GR;
3005 else if (reg >= REG_FR && reg <= (REG_FR + 127))
3008 *regp = reg - REG_FR;
3010 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3013 *regp = reg - REG_BR;
3023 /* The current frag is an alignment frag. */
3024 align_frag = frag_now;
3025 s_align_bytes (arg);
3030 int dummy ATTRIBUTE_UNUSED;
3035 radix = *input_line_pointer++;
3037 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
3039 as_bad ("Radix `%c' unsupported", *input_line_pointer);
3040 ignore_rest_of_line ();
3045 /* Helper function for .loc directives. If the assembler is not generating
3046 line number info, then we need to remember which instructions have a .loc
3047 directive, and only call dwarf2_gen_line_info for those instructions. */
3052 CURR_SLOT.loc_directive_seen = 1;
3053 dwarf2_directive_loc (x);
3056 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3058 dot_special_section (which)
3061 set_section ((char *) special_section_name[which]);
3065 add_unwind_entry (ptr)
3069 unwind.tail->next = ptr;
3074 /* The current entry can in fact be a chain of unwind entries. */
3075 if (unwind.current_entry == NULL)
3076 unwind.current_entry = ptr;
3081 int dummy ATTRIBUTE_UNUSED;
3087 if (e.X_op != O_constant)
3088 as_bad ("Operand to .fframe must be a constant");
3090 add_unwind_entry (output_mem_stack_f (e.X_add_number));
3095 int dummy ATTRIBUTE_UNUSED;
3101 reg = e.X_add_number - REG_GR;
3102 if (e.X_op == O_register && reg < 128)
3104 add_unwind_entry (output_mem_stack_v ());
3105 if (! (unwind.prologue_mask & 2))
3106 add_unwind_entry (output_psp_gr (reg));
3109 as_bad ("First operand to .vframe must be a general register");
3113 dot_vframesp (dummy)
3114 int dummy ATTRIBUTE_UNUSED;
3119 if (e.X_op == O_constant)
3121 add_unwind_entry (output_mem_stack_v ());
3122 add_unwind_entry (output_psp_sprel (e.X_add_number));
3125 as_bad ("Operand to .vframesp must be a constant (sp-relative offset)");
3129 dot_vframepsp (dummy)
3130 int dummy ATTRIBUTE_UNUSED;
3135 if (e.X_op == O_constant)
3137 add_unwind_entry (output_mem_stack_v ());
3138 add_unwind_entry (output_psp_sprel (e.X_add_number));
3141 as_bad ("Operand to .vframepsp must be a constant (psp-relative offset)");
3146 int dummy ATTRIBUTE_UNUSED;
3152 sep = parse_operand (&e1);
3154 as_bad ("No second operand to .save");
3155 sep = parse_operand (&e2);
3157 reg1 = e1.X_add_number;
3158 reg2 = e2.X_add_number - REG_GR;
3160 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3161 if (e1.X_op == O_register)
3163 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3167 case REG_AR + AR_BSP:
3168 add_unwind_entry (output_bsp_when ());
3169 add_unwind_entry (output_bsp_gr (reg2));
3171 case REG_AR + AR_BSPSTORE:
3172 add_unwind_entry (output_bspstore_when ());
3173 add_unwind_entry (output_bspstore_gr (reg2));
3175 case REG_AR + AR_RNAT:
3176 add_unwind_entry (output_rnat_when ());
3177 add_unwind_entry (output_rnat_gr (reg2));
3179 case REG_AR + AR_UNAT:
3180 add_unwind_entry (output_unat_when ());
3181 add_unwind_entry (output_unat_gr (reg2));
3183 case REG_AR + AR_FPSR:
3184 add_unwind_entry (output_fpsr_when ());
3185 add_unwind_entry (output_fpsr_gr (reg2));
3187 case REG_AR + AR_PFS:
3188 add_unwind_entry (output_pfs_when ());
3189 if (! (unwind.prologue_mask & 4))
3190 add_unwind_entry (output_pfs_gr (reg2));
3192 case REG_AR + AR_LC:
3193 add_unwind_entry (output_lc_when ());
3194 add_unwind_entry (output_lc_gr (reg2));
3197 add_unwind_entry (output_rp_when ());
3198 if (! (unwind.prologue_mask & 8))
3199 add_unwind_entry (output_rp_gr (reg2));
3202 add_unwind_entry (output_preds_when ());
3203 if (! (unwind.prologue_mask & 1))
3204 add_unwind_entry (output_preds_gr (reg2));
3207 add_unwind_entry (output_priunat_when_gr ());
3208 add_unwind_entry (output_priunat_gr (reg2));
3211 as_bad ("First operand not a valid register");
3215 as_bad (" Second operand not a valid register");
3218 as_bad ("First operand not a register");
3223 int dummy ATTRIBUTE_UNUSED;
3226 unsigned long ecount; /* # of _additional_ regions to pop */
3229 sep = parse_operand (&e1);
3230 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3232 as_bad ("First operand to .restore must be stack pointer (sp)");
3238 parse_operand (&e2);
3239 if (e2.X_op != O_constant || e2.X_add_number < 0)
3241 as_bad ("Second operand to .restore must be a constant >= 0");
3244 ecount = e2.X_add_number;
3247 ecount = unwind.prologue_count - 1;
3249 if (ecount >= unwind.prologue_count)
3251 as_bad ("Epilogue count of %lu exceeds number of nested prologues (%u)",
3252 ecount + 1, unwind.prologue_count);
3256 add_unwind_entry (output_epilogue (ecount));
3258 if (ecount < unwind.prologue_count)
3259 unwind.prologue_count -= ecount + 1;
3261 unwind.prologue_count = 0;
3265 dot_restorereg (dummy)
3266 int dummy ATTRIBUTE_UNUSED;
3268 unsigned int ab, reg;
3273 if (!convert_expr_to_ab_reg (&e, &ab, ®))
3275 as_bad ("First operand to .restorereg must be a preserved register");
3278 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3282 dot_restorereg_p (dummy)
3283 int dummy ATTRIBUTE_UNUSED;
3285 unsigned int qp, ab, reg;
3289 sep = parse_operand (&e1);
3292 as_bad ("No second operand to .restorereg.p");
3296 parse_operand (&e2);
3298 qp = e1.X_add_number - REG_P;
3299 if (e1.X_op != O_register || qp > 63)
3301 as_bad ("First operand to .restorereg.p must be a predicate");
3305 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3307 as_bad ("Second operand to .restorereg.p must be a preserved register");
3310 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3313 static char *special_linkonce_name[] =
3315 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3319 start_unwind_section (const segT text_seg, int sec_index, int linkonce_empty)
3322 Use a slightly ugly scheme to derive the unwind section names from
3323 the text section name:
3325 text sect. unwind table sect.
3326 name: name: comments:
3327 ---------- ----------------- --------------------------------
3329 .text.foo .IA_64.unwind.text.foo
3330 .foo .IA_64.unwind.foo
3332 .gnu.linkonce.ia64unw.foo
3333 _info .IA_64.unwind_info gas issues error message (ditto)
3334 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3336 This mapping is done so that:
3338 (a) An object file with unwind info only in .text will use
3339 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3340 This follows the letter of the ABI and also ensures backwards
3341 compatibility with older toolchains.
3343 (b) An object file with unwind info in multiple text sections
3344 will use separate unwind sections for each text section.
3345 This allows us to properly set the "sh_info" and "sh_link"
3346 fields in SHT_IA_64_UNWIND as required by the ABI and also
3347 lets GNU ld support programs with multiple segments
3348 containing unwind info (as might be the case for certain
3349 embedded applications).
3351 (c) An error is issued if there would be a name clash.
3354 const char *text_name, *sec_text_name;
3356 const char *prefix = special_section_name [sec_index];
3358 size_t prefix_len, suffix_len, sec_name_len;
3360 sec_text_name = segment_name (text_seg);
3361 text_name = sec_text_name;
3362 if (strncmp (text_name, "_info", 5) == 0)
3364 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3366 ignore_rest_of_line ();
3369 if (strcmp (text_name, ".text") == 0)
3372 /* Build the unwind section name by appending the (possibly stripped)
3373 text section name to the unwind prefix. */
3375 if (strncmp (text_name, ".gnu.linkonce.t.",
3376 sizeof (".gnu.linkonce.t.") - 1) == 0)
3378 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3379 suffix += sizeof (".gnu.linkonce.t.") - 1;
3381 else if (linkonce_empty)
3384 prefix_len = strlen (prefix);
3385 suffix_len = strlen (suffix);
3386 sec_name_len = prefix_len + suffix_len;
3387 sec_name = alloca (sec_name_len + 1);
3388 memcpy (sec_name, prefix, prefix_len);
3389 memcpy (sec_name + prefix_len, suffix, suffix_len);
3390 sec_name [sec_name_len] = '\0';
3392 /* Handle COMDAT group. */
3393 if (suffix == text_name && (text_seg->flags & SEC_LINK_ONCE) != 0)
3396 size_t len, group_name_len;
3397 const char *group_name = elf_group_name (text_seg);
3399 if (group_name == NULL)
3401 as_bad ("Group section `%s' has no group signature",
3403 ignore_rest_of_line ();
3406 /* We have to construct a fake section directive. */
3407 group_name_len = strlen (group_name);
3409 + 16 /* ,"aG",@progbits, */
3410 + group_name_len /* ,group_name */
3413 section = alloca (len + 1);
3414 memcpy (section, sec_name, sec_name_len);
3415 memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16);
3416 memcpy (section + sec_name_len + 16, group_name, group_name_len);
3417 memcpy (section + len - 7, ",comdat", 7);
3418 section [len] = '\0';
3419 set_section (section);
3423 set_section (sec_name);
3424 bfd_set_section_flags (stdoutput, now_seg,
3425 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3428 elf_linked_to_section (now_seg) = text_seg;
3432 generate_unwind_image (const segT text_seg)
3437 /* Mark the end of the unwind info, so that we can compute the size of the
3438 last unwind region. */
3439 add_unwind_entry (output_endp ());
3441 /* Force out pending instructions, to make sure all unwind records have
3442 a valid slot_number field. */
3443 ia64_flush_insns ();
3445 /* Generate the unwind record. */
3446 list = optimize_unw_records (unwind.list);
3447 fixup_unw_records (list, 1);
3448 size = calc_record_size (list);
3450 if (size > 0 || unwind.force_unwind_entry)
3452 unwind.force_unwind_entry = 0;
3453 /* pad to pointer-size boundary. */
3454 pad = size % md.pointer_size;
3456 size += md.pointer_size - pad;
3457 /* Add 8 for the header + a pointer for the personality
3459 size += 8 + md.pointer_size;
3462 /* If there are unwind records, switch sections, and output the info. */
3466 bfd_reloc_code_real_type reloc;
3468 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO, 0);
3470 /* Make sure the section has 4 byte alignment for ILP32 and
3471 8 byte alignment for LP64. */
3472 frag_align (md.pointer_size_shift, 0, 0);
3473 record_alignment (now_seg, md.pointer_size_shift);
3475 /* Set expression which points to start of unwind descriptor area. */
3476 unwind.info = expr_build_dot ();
3478 frag_var (rs_machine_dependent, size, size, 0, 0,
3479 (offsetT) (long) unwind.personality_routine,
3482 /* Add the personality address to the image. */
3483 if (unwind.personality_routine != 0)
3485 exp.X_op = O_symbol;
3486 exp.X_add_symbol = unwind.personality_routine;
3487 exp.X_add_number = 0;
3489 if (md.flags & EF_IA_64_BE)
3491 if (md.flags & EF_IA_64_ABI64)
3492 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3494 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3498 if (md.flags & EF_IA_64_ABI64)
3499 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3501 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3504 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3505 md.pointer_size, &exp, 0, reloc);
3506 unwind.personality_routine = 0;
3510 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO, 1);
3512 free_saved_prologue_counts ();
3513 unwind.list = unwind.tail = unwind.current_entry = NULL;
3517 dot_handlerdata (dummy)
3518 int dummy ATTRIBUTE_UNUSED;
3520 unwind.force_unwind_entry = 1;
3522 /* Remember which segment we're in so we can switch back after .endp */
3523 unwind.saved_text_seg = now_seg;
3524 unwind.saved_text_subseg = now_subseg;
3526 /* Generate unwind info into unwind-info section and then leave that
3527 section as the currently active one so dataXX directives go into
3528 the language specific data area of the unwind info block. */
3529 generate_unwind_image (now_seg);
3530 demand_empty_rest_of_line ();
3534 dot_unwentry (dummy)
3535 int dummy ATTRIBUTE_UNUSED;
3537 unwind.force_unwind_entry = 1;
3538 demand_empty_rest_of_line ();
3543 int dummy ATTRIBUTE_UNUSED;
3549 reg = e.X_add_number - REG_BR;
3550 if (e.X_op == O_register && reg < 8)
3551 add_unwind_entry (output_rp_br (reg));
3553 as_bad ("First operand not a valid branch register");
3557 dot_savemem (psprel)
3564 sep = parse_operand (&e1);
3566 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3567 sep = parse_operand (&e2);
3569 reg1 = e1.X_add_number;
3570 val = e2.X_add_number;
3572 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3573 if (e1.X_op == O_register)
3575 if (e2.X_op == O_constant)
3579 case REG_AR + AR_BSP:
3580 add_unwind_entry (output_bsp_when ());
3581 add_unwind_entry ((psprel
3583 : output_bsp_sprel) (val));
3585 case REG_AR + AR_BSPSTORE:
3586 add_unwind_entry (output_bspstore_when ());
3587 add_unwind_entry ((psprel
3588 ? output_bspstore_psprel
3589 : output_bspstore_sprel) (val));
3591 case REG_AR + AR_RNAT:
3592 add_unwind_entry (output_rnat_when ());
3593 add_unwind_entry ((psprel
3594 ? output_rnat_psprel
3595 : output_rnat_sprel) (val));
3597 case REG_AR + AR_UNAT:
3598 add_unwind_entry (output_unat_when ());
3599 add_unwind_entry ((psprel
3600 ? output_unat_psprel
3601 : output_unat_sprel) (val));
3603 case REG_AR + AR_FPSR:
3604 add_unwind_entry (output_fpsr_when ());
3605 add_unwind_entry ((psprel
3606 ? output_fpsr_psprel
3607 : output_fpsr_sprel) (val));
3609 case REG_AR + AR_PFS:
3610 add_unwind_entry (output_pfs_when ());
3611 add_unwind_entry ((psprel
3613 : output_pfs_sprel) (val));
3615 case REG_AR + AR_LC:
3616 add_unwind_entry (output_lc_when ());
3617 add_unwind_entry ((psprel
3619 : output_lc_sprel) (val));
3622 add_unwind_entry (output_rp_when ());
3623 add_unwind_entry ((psprel
3625 : output_rp_sprel) (val));
3628 add_unwind_entry (output_preds_when ());
3629 add_unwind_entry ((psprel
3630 ? output_preds_psprel
3631 : output_preds_sprel) (val));
3634 add_unwind_entry (output_priunat_when_mem ());
3635 add_unwind_entry ((psprel
3636 ? output_priunat_psprel
3637 : output_priunat_sprel) (val));
3640 as_bad ("First operand not a valid register");
3644 as_bad (" Second operand not a valid constant");
3647 as_bad ("First operand not a register");
3652 int dummy ATTRIBUTE_UNUSED;
3656 sep = parse_operand (&e1);
3658 parse_operand (&e2);
3660 if (e1.X_op != O_constant)
3661 as_bad ("First operand to .save.g must be a constant.");
3664 int grmask = e1.X_add_number;
3666 add_unwind_entry (output_gr_mem (grmask));
3669 int reg = e2.X_add_number - REG_GR;
3670 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3671 add_unwind_entry (output_gr_gr (grmask, reg));
3673 as_bad ("Second operand is an invalid register.");
3680 int dummy ATTRIBUTE_UNUSED;
3684 sep = parse_operand (&e1);
3686 if (e1.X_op != O_constant)
3687 as_bad ("Operand to .save.f must be a constant.");
3689 add_unwind_entry (output_fr_mem (e1.X_add_number));
3694 int dummy ATTRIBUTE_UNUSED;
3701 sep = parse_operand (&e1);
3702 if (e1.X_op != O_constant)
3704 as_bad ("First operand to .save.b must be a constant.");
3707 brmask = e1.X_add_number;
3711 sep = parse_operand (&e2);
3712 reg = e2.X_add_number - REG_GR;
3713 if (e2.X_op != O_register || reg > 127)
3715 as_bad ("Second operand to .save.b must be a general register.");
3718 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3721 add_unwind_entry (output_br_mem (brmask));
3723 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3724 demand_empty_rest_of_line ();
3729 int dummy ATTRIBUTE_UNUSED;
3733 sep = parse_operand (&e1);
3735 parse_operand (&e2);
3737 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3738 as_bad ("Both operands of .save.gf must be constants.");
3741 int grmask = e1.X_add_number;
3742 int frmask = e2.X_add_number;
3743 add_unwind_entry (output_frgr_mem (grmask, frmask));
3749 int dummy ATTRIBUTE_UNUSED;
3754 sep = parse_operand (&e);
3755 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3756 demand_empty_rest_of_line ();
3758 if (e.X_op != O_constant)
3759 as_bad ("Operand to .spill must be a constant");
3761 add_unwind_entry (output_spill_base (e.X_add_number));
3765 dot_spillreg (dummy)
3766 int dummy ATTRIBUTE_UNUSED;
3768 int sep, ab, xy, reg, treg;
3771 sep = parse_operand (&e1);
3774 as_bad ("No second operand to .spillreg");
3778 parse_operand (&e2);
3780 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3782 as_bad ("First operand to .spillreg must be a preserved register");
3786 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3788 as_bad ("Second operand to .spillreg must be a register");
3792 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3796 dot_spillmem (psprel)
3802 sep = parse_operand (&e1);
3805 as_bad ("Second operand missing");
3809 parse_operand (&e2);
3811 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3813 as_bad ("First operand to .spill%s must be a preserved register",
3814 psprel ? "psp" : "sp");
3818 if (e2.X_op != O_constant)
3820 as_bad ("Second operand to .spill%s must be a constant",
3821 psprel ? "psp" : "sp");
3826 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3828 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3832 dot_spillreg_p (dummy)
3833 int dummy ATTRIBUTE_UNUSED;
3835 int sep, ab, xy, reg, treg;
3836 expressionS e1, e2, e3;
3839 sep = parse_operand (&e1);
3842 as_bad ("No second and third operand to .spillreg.p");
3846 sep = parse_operand (&e2);
3849 as_bad ("No third operand to .spillreg.p");
3853 parse_operand (&e3);
3855 qp = e1.X_add_number - REG_P;
3857 if (e1.X_op != O_register || qp > 63)
3859 as_bad ("First operand to .spillreg.p must be a predicate");
3863 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3865 as_bad ("Second operand to .spillreg.p must be a preserved register");
3869 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3871 as_bad ("Third operand to .spillreg.p must be a register");
3875 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3879 dot_spillmem_p (psprel)
3882 expressionS e1, e2, e3;
3886 sep = parse_operand (&e1);
3889 as_bad ("Second operand missing");
3893 parse_operand (&e2);
3896 as_bad ("Second operand missing");
3900 parse_operand (&e3);
3902 qp = e1.X_add_number - REG_P;
3903 if (e1.X_op != O_register || qp > 63)
3905 as_bad ("First operand to .spill%s_p must be a predicate",
3906 psprel ? "psp" : "sp");
3910 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3912 as_bad ("Second operand to .spill%s_p must be a preserved register",
3913 psprel ? "psp" : "sp");
3917 if (e3.X_op != O_constant)
3919 as_bad ("Third operand to .spill%s_p must be a constant",
3920 psprel ? "psp" : "sp");
3925 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3927 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3931 get_saved_prologue_count (lbl)
3934 label_prologue_count *lpc = unwind.saved_prologue_counts;
3936 while (lpc != NULL && lpc->label_number != lbl)
3940 return lpc->prologue_count;
3942 as_bad ("Missing .label_state %ld", lbl);
3947 save_prologue_count (lbl, count)
3951 label_prologue_count *lpc = unwind.saved_prologue_counts;
3953 while (lpc != NULL && lpc->label_number != lbl)
3957 lpc->prologue_count = count;
3960 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
3962 new_lpc->next = unwind.saved_prologue_counts;
3963 new_lpc->label_number = lbl;
3964 new_lpc->prologue_count = count;
3965 unwind.saved_prologue_counts = new_lpc;
3970 free_saved_prologue_counts ()
3972 label_prologue_count *lpc = unwind.saved_prologue_counts;
3973 label_prologue_count *next;
3982 unwind.saved_prologue_counts = NULL;
3986 dot_label_state (dummy)
3987 int dummy ATTRIBUTE_UNUSED;
3992 if (e.X_op != O_constant)
3994 as_bad ("Operand to .label_state must be a constant");
3997 add_unwind_entry (output_label_state (e.X_add_number));
3998 save_prologue_count (e.X_add_number, unwind.prologue_count);
4002 dot_copy_state (dummy)
4003 int dummy ATTRIBUTE_UNUSED;
4008 if (e.X_op != O_constant)
4010 as_bad ("Operand to .copy_state must be a constant");
4013 add_unwind_entry (output_copy_state (e.X_add_number));
4014 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4019 int dummy ATTRIBUTE_UNUSED;
4024 sep = parse_operand (&e1);
4027 as_bad ("Second operand to .unwabi missing");
4030 sep = parse_operand (&e2);
4031 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4032 demand_empty_rest_of_line ();
4034 if (e1.X_op != O_constant)
4036 as_bad ("First operand to .unwabi must be a constant");
4040 if (e2.X_op != O_constant)
4042 as_bad ("Second operand to .unwabi must be a constant");
4046 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
4050 dot_personality (dummy)
4051 int dummy ATTRIBUTE_UNUSED;
4055 name = input_line_pointer;
4056 c = get_symbol_end ();
4057 p = input_line_pointer;
4058 unwind.personality_routine = symbol_find_or_make (name);
4059 unwind.force_unwind_entry = 1;
4062 demand_empty_rest_of_line ();
4067 int dummy ATTRIBUTE_UNUSED;
4072 unwind.proc_start = expr_build_dot ();
4073 /* Parse names of main and alternate entry points and mark them as
4074 function symbols: */
4078 name = input_line_pointer;
4079 c = get_symbol_end ();
4080 p = input_line_pointer;
4081 sym = symbol_find_or_make (name);
4082 if (unwind.proc_start == 0)
4084 unwind.proc_start = sym;
4086 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4089 if (*input_line_pointer != ',')
4091 ++input_line_pointer;
4093 demand_empty_rest_of_line ();
4096 unwind.prologue_count = 0;
4097 unwind.list = unwind.tail = unwind.current_entry = NULL;
4098 unwind.personality_routine = 0;
4103 int dummy ATTRIBUTE_UNUSED;
4105 unwind.prologue = 0;
4106 unwind.prologue_mask = 0;
4108 add_unwind_entry (output_body ());
4109 demand_empty_rest_of_line ();
4113 dot_prologue (dummy)
4114 int dummy ATTRIBUTE_UNUSED;
4117 int mask = 0, grsave = 0;
4119 if (!is_it_end_of_statement ())
4122 sep = parse_operand (&e1);
4124 as_bad ("No second operand to .prologue");
4125 sep = parse_operand (&e2);
4126 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4127 demand_empty_rest_of_line ();
4129 if (e1.X_op == O_constant)
4131 mask = e1.X_add_number;
4133 if (e2.X_op == O_constant)
4134 grsave = e2.X_add_number;
4135 else if (e2.X_op == O_register
4136 && (grsave = e2.X_add_number - REG_GR) < 128)
4139 as_bad ("Second operand not a constant or general register");
4141 add_unwind_entry (output_prologue_gr (mask, grsave));
4144 as_bad ("First operand not a constant");
4147 add_unwind_entry (output_prologue ());
4149 unwind.prologue = 1;
4150 unwind.prologue_mask = mask;
4151 ++unwind.prologue_count;
4156 int dummy ATTRIBUTE_UNUSED;
4160 int bytes_per_address;
4163 subsegT saved_subseg;
4167 if (unwind.saved_text_seg)
4169 saved_seg = unwind.saved_text_seg;
4170 saved_subseg = unwind.saved_text_subseg;
4171 unwind.saved_text_seg = NULL;
4175 saved_seg = now_seg;
4176 saved_subseg = now_subseg;
4179 insn_group_break (1, 0, 0);
4181 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4183 generate_unwind_image (saved_seg);
4185 if (unwind.info || unwind.force_unwind_entry)
4187 subseg_set (md.last_text_seg, 0);
4188 unwind.proc_end = expr_build_dot ();
4190 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND, 0);
4192 /* Make sure that section has 4 byte alignment for ILP32 and
4193 8 byte alignment for LP64. */
4194 record_alignment (now_seg, md.pointer_size_shift);
4196 /* Need space for 3 pointers for procedure start, procedure end,
4198 ptr = frag_more (3 * md.pointer_size);
4199 where = frag_now_fix () - (3 * md.pointer_size);
4200 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4202 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4203 e.X_op = O_pseudo_fixup;
4204 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4206 e.X_add_symbol = unwind.proc_start;
4207 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
4209 e.X_op = O_pseudo_fixup;
4210 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4212 e.X_add_symbol = unwind.proc_end;
4213 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4214 bytes_per_address, &e);
4218 e.X_op = O_pseudo_fixup;
4219 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4221 e.X_add_symbol = unwind.info;
4222 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4223 bytes_per_address, &e);
4226 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
4231 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND, 1);
4233 subseg_set (saved_seg, saved_subseg);
4235 /* Parse names of main and alternate entry points and set symbol sizes. */
4239 name = input_line_pointer;
4240 c = get_symbol_end ();
4241 p = input_line_pointer;
4242 sym = symbol_find (name);
4243 if (sym && unwind.proc_start
4244 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION)
4245 && S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL)
4247 fragS *fr = symbol_get_frag (unwind.proc_start);
4248 fragS *frag = symbol_get_frag (sym);
4250 /* Check whether the function label is at or beyond last
4252 while (fr && fr != frag)
4256 if (frag == frag_now && SEG_NORMAL (now_seg))
4257 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4260 symbol_get_obj (sym)->size =
4261 (expressionS *) xmalloc (sizeof (expressionS));
4262 symbol_get_obj (sym)->size->X_op = O_subtract;
4263 symbol_get_obj (sym)->size->X_add_symbol
4264 = symbol_new (FAKE_LABEL_NAME, now_seg,
4265 frag_now_fix (), frag_now);
4266 symbol_get_obj (sym)->size->X_op_symbol = sym;
4267 symbol_get_obj (sym)->size->X_add_number = 0;
4273 if (*input_line_pointer != ',')
4275 ++input_line_pointer;
4277 demand_empty_rest_of_line ();
4278 unwind.proc_start = unwind.proc_end = unwind.info = 0;
4282 dot_template (template)
4285 CURR_SLOT.user_template = template;
4290 int dummy ATTRIBUTE_UNUSED;
4292 int ins, locs, outs, rots;
4294 if (is_it_end_of_statement ())
4295 ins = locs = outs = rots = 0;
4298 ins = get_absolute_expression ();
4299 if (*input_line_pointer++ != ',')
4301 locs = get_absolute_expression ();
4302 if (*input_line_pointer++ != ',')
4304 outs = get_absolute_expression ();
4305 if (*input_line_pointer++ != ',')
4307 rots = get_absolute_expression ();
4309 set_regstack (ins, locs, outs, rots);
4313 as_bad ("Comma expected");
4314 ignore_rest_of_line ();
4321 unsigned num_regs, num_alloced = 0;
4322 struct dynreg **drpp, *dr;
4323 int ch, base_reg = 0;
4329 case DYNREG_GR: base_reg = REG_GR + 32; break;
4330 case DYNREG_FR: base_reg = REG_FR + 32; break;
4331 case DYNREG_PR: base_reg = REG_P + 16; break;
4335 /* First, remove existing names from hash table. */
4336 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4338 hash_delete (md.dynreg_hash, dr->name);
4342 drpp = &md.dynreg[type];
4345 start = input_line_pointer;
4346 ch = get_symbol_end ();
4347 *input_line_pointer = ch;
4348 len = (input_line_pointer - start);
4351 if (*input_line_pointer != '[')
4353 as_bad ("Expected '['");
4356 ++input_line_pointer; /* skip '[' */
4358 num_regs = get_absolute_expression ();
4360 if (*input_line_pointer++ != ']')
4362 as_bad ("Expected ']'");
4367 num_alloced += num_regs;
4371 if (num_alloced > md.rot.num_regs)
4373 as_bad ("Used more than the declared %d rotating registers",
4379 if (num_alloced > 96)
4381 as_bad ("Used more than the available 96 rotating registers");
4386 if (num_alloced > 48)
4388 as_bad ("Used more than the available 48 rotating registers");
4397 name = obstack_alloc (¬es, len + 1);
4398 memcpy (name, start, len);
4403 *drpp = obstack_alloc (¬es, sizeof (*dr));
4404 memset (*drpp, 0, sizeof (*dr));
4409 dr->num_regs = num_regs;
4410 dr->base = base_reg;
4412 base_reg += num_regs;
4414 if (hash_insert (md.dynreg_hash, name, dr))
4416 as_bad ("Attempt to redefine register set `%s'", name);
4420 if (*input_line_pointer != ',')
4422 ++input_line_pointer; /* skip comma */
4425 demand_empty_rest_of_line ();
4429 ignore_rest_of_line ();
4433 dot_byteorder (byteorder)
4436 segment_info_type *seginfo = seg_info (now_seg);
4438 if (byteorder == -1)
4440 if (seginfo->tc_segment_info_data.endian == 0)
4441 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4442 byteorder = seginfo->tc_segment_info_data.endian == 1;
4445 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4447 if (target_big_endian != byteorder)
4449 target_big_endian = byteorder;
4450 if (target_big_endian)
4452 ia64_number_to_chars = number_to_chars_bigendian;
4453 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4457 ia64_number_to_chars = number_to_chars_littleendian;
4458 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4465 int dummy ATTRIBUTE_UNUSED;
4472 option = input_line_pointer;
4473 ch = get_symbol_end ();
4474 if (strcmp (option, "lsb") == 0)
4475 md.flags &= ~EF_IA_64_BE;
4476 else if (strcmp (option, "msb") == 0)
4477 md.flags |= EF_IA_64_BE;
4478 else if (strcmp (option, "abi32") == 0)
4479 md.flags &= ~EF_IA_64_ABI64;
4480 else if (strcmp (option, "abi64") == 0)
4481 md.flags |= EF_IA_64_ABI64;
4483 as_bad ("Unknown psr option `%s'", option);
4484 *input_line_pointer = ch;
4487 if (*input_line_pointer != ',')
4490 ++input_line_pointer;
4493 demand_empty_rest_of_line ();
4498 int dummy ATTRIBUTE_UNUSED;
4500 new_logical_line (0, get_absolute_expression ());
4501 demand_empty_rest_of_line ();
4505 parse_section_name ()
4511 if (*input_line_pointer != '"')
4513 as_bad ("Missing section name");
4514 ignore_rest_of_line ();
4517 name = demand_copy_C_string (&len);
4520 ignore_rest_of_line ();
4524 if (*input_line_pointer != ',')
4526 as_bad ("Comma expected after section name");
4527 ignore_rest_of_line ();
4530 ++input_line_pointer; /* skip comma */
4538 char *name = parse_section_name ();
4542 md.keep_pending_output = 1;
4545 obj_elf_previous (0);
4546 md.keep_pending_output = 0;
4549 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4552 stmt_float_cons (kind)
4573 ia64_do_align (alignment);
4581 int saved_auto_align = md.auto_align;
4585 md.auto_align = saved_auto_align;
4589 dot_xfloat_cons (kind)
4592 char *name = parse_section_name ();
4596 md.keep_pending_output = 1;
4598 stmt_float_cons (kind);
4599 obj_elf_previous (0);
4600 md.keep_pending_output = 0;
4604 dot_xstringer (zero)
4607 char *name = parse_section_name ();
4611 md.keep_pending_output = 1;
4614 obj_elf_previous (0);
4615 md.keep_pending_output = 0;
4622 int saved_auto_align = md.auto_align;
4623 char *name = parse_section_name ();
4627 md.keep_pending_output = 1;
4631 md.auto_align = saved_auto_align;
4632 obj_elf_previous (0);
4633 md.keep_pending_output = 0;
4637 dot_xfloat_cons_ua (kind)
4640 int saved_auto_align = md.auto_align;
4641 char *name = parse_section_name ();
4645 md.keep_pending_output = 1;
4648 stmt_float_cons (kind);
4649 md.auto_align = saved_auto_align;
4650 obj_elf_previous (0);
4651 md.keep_pending_output = 0;
4654 /* .reg.val <regname>,value */
4658 int dummy ATTRIBUTE_UNUSED;
4663 if (reg.X_op != O_register)
4665 as_bad (_("Register name expected"));
4666 ignore_rest_of_line ();
4668 else if (*input_line_pointer++ != ',')
4670 as_bad (_("Comma expected"));
4671 ignore_rest_of_line ();
4675 valueT value = get_absolute_expression ();
4676 int regno = reg.X_add_number;
4677 if (regno < REG_GR || regno > REG_GR + 128)
4678 as_warn (_("Register value annotation ignored"));
4681 gr_values[regno - REG_GR].known = 1;
4682 gr_values[regno - REG_GR].value = value;
4683 gr_values[regno - REG_GR].path = md.path;
4686 demand_empty_rest_of_line ();
4691 .serialize.instruction
4694 dot_serialize (type)
4697 insn_group_break (0, 0, 0);
4699 instruction_serialization ();
4701 data_serialization ();
4702 insn_group_break (0, 0, 0);
4703 demand_empty_rest_of_line ();
4706 /* select dv checking mode
4711 A stop is inserted when changing modes
4718 if (md.manual_bundling)
4719 as_warn (_("Directive invalid within a bundle"));
4721 if (type == 'E' || type == 'A')
4722 md.mode_explicitly_set = 0;
4724 md.mode_explicitly_set = 1;
4731 if (md.explicit_mode)
4732 insn_group_break (1, 0, 0);
4733 md.explicit_mode = 0;
4737 if (!md.explicit_mode)
4738 insn_group_break (1, 0, 0);
4739 md.explicit_mode = 1;
4743 if (md.explicit_mode != md.default_explicit_mode)
4744 insn_group_break (1, 0, 0);
4745 md.explicit_mode = md.default_explicit_mode;
4746 md.mode_explicitly_set = 0;
4757 for (regno = 0; regno < 64; regno++)
4759 if (mask & ((valueT) 1 << regno))
4761 fprintf (stderr, "%s p%d", comma, regno);
4768 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4769 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4770 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4771 .pred.safe_across_calls p1 [, p2 [,...]]
4780 int p1 = -1, p2 = -1;
4784 if (*input_line_pointer != '"')
4786 as_bad (_("Missing predicate relation type"));
4787 ignore_rest_of_line ();
4793 char *form = demand_copy_C_string (&len);
4794 if (strcmp (form, "mutex") == 0)
4796 else if (strcmp (form, "clear") == 0)
4798 else if (strcmp (form, "imply") == 0)
4802 as_bad (_("Unrecognized predicate relation type"));
4803 ignore_rest_of_line ();
4807 if (*input_line_pointer == ',')
4808 ++input_line_pointer;
4818 if (TOUPPER (*input_line_pointer) != 'P'
4819 || (regno = atoi (++input_line_pointer)) < 0
4822 as_bad (_("Predicate register expected"));
4823 ignore_rest_of_line ();
4826 while (ISDIGIT (*input_line_pointer))
4827 ++input_line_pointer;
4834 as_warn (_("Duplicate predicate register ignored"));
4837 /* See if it's a range. */
4838 if (*input_line_pointer == '-')
4841 ++input_line_pointer;
4843 if (TOUPPER (*input_line_pointer) != 'P'
4844 || (regno = atoi (++input_line_pointer)) < 0
4847 as_bad (_("Predicate register expected"));
4848 ignore_rest_of_line ();
4851 while (ISDIGIT (*input_line_pointer))
4852 ++input_line_pointer;
4856 as_bad (_("Bad register range"));
4857 ignore_rest_of_line ();
4868 if (*input_line_pointer != ',')
4870 ++input_line_pointer;
4879 clear_qp_mutex (mask);
4880 clear_qp_implies (mask, (valueT) 0);
4883 if (count != 2 || p1 == -1 || p2 == -1)
4884 as_bad (_("Predicate source and target required"));
4885 else if (p1 == 0 || p2 == 0)
4886 as_bad (_("Use of p0 is not valid in this context"));
4888 add_qp_imply (p1, p2);
4893 as_bad (_("At least two PR arguments expected"));
4898 as_bad (_("Use of p0 is not valid in this context"));
4901 add_qp_mutex (mask);
4904 /* note that we don't override any existing relations */
4907 as_bad (_("At least one PR argument expected"));
4912 fprintf (stderr, "Safe across calls: ");
4913 print_prmask (mask);
4914 fprintf (stderr, "\n");
4916 qp_safe_across_calls = mask;
4919 demand_empty_rest_of_line ();
4922 /* .entry label [, label [, ...]]
4923 Hint to DV code that the given labels are to be considered entry points.
4924 Otherwise, only global labels are considered entry points. */
4928 int dummy ATTRIBUTE_UNUSED;
4937 name = input_line_pointer;
4938 c = get_symbol_end ();
4939 symbolP = symbol_find_or_make (name);
4941 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4943 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4946 *input_line_pointer = c;
4948 c = *input_line_pointer;
4951 input_line_pointer++;
4953 if (*input_line_pointer == '\n')
4959 demand_empty_rest_of_line ();
4962 /* .mem.offset offset, base
4963 "base" is used to distinguish between offsets from a different base. */
4966 dot_mem_offset (dummy)
4967 int dummy ATTRIBUTE_UNUSED;
4969 md.mem_offset.hint = 1;
4970 md.mem_offset.offset = get_absolute_expression ();
4971 if (*input_line_pointer != ',')
4973 as_bad (_("Comma expected"));
4974 ignore_rest_of_line ();
4977 ++input_line_pointer;
4978 md.mem_offset.base = get_absolute_expression ();
4979 demand_empty_rest_of_line ();
4982 /* ia64-specific pseudo-ops: */
4983 const pseudo_typeS md_pseudo_table[] =
4985 { "radix", dot_radix, 0 },
4986 { "lcomm", s_lcomm_bytes, 1 },
4987 { "loc", dot_loc, 0 },
4988 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4989 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4990 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4991 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4992 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4993 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4994 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4995 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
4996 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
4997 { "proc", dot_proc, 0 },
4998 { "body", dot_body, 0 },
4999 { "prologue", dot_prologue, 0 },
5000 { "endp", dot_endp, 0 },
5002 { "fframe", dot_fframe, 0 },
5003 { "vframe", dot_vframe, 0 },
5004 { "vframesp", dot_vframesp, 0 },
5005 { "vframepsp", dot_vframepsp, 0 },
5006 { "save", dot_save, 0 },
5007 { "restore", dot_restore, 0 },
5008 { "restorereg", dot_restorereg, 0 },
5009 { "restorereg.p", dot_restorereg_p, 0 },
5010 { "handlerdata", dot_handlerdata, 0 },
5011 { "unwentry", dot_unwentry, 0 },
5012 { "altrp", dot_altrp, 0 },
5013 { "savesp", dot_savemem, 0 },
5014 { "savepsp", dot_savemem, 1 },
5015 { "save.g", dot_saveg, 0 },
5016 { "save.f", dot_savef, 0 },
5017 { "save.b", dot_saveb, 0 },
5018 { "save.gf", dot_savegf, 0 },
5019 { "spill", dot_spill, 0 },
5020 { "spillreg", dot_spillreg, 0 },
5021 { "spillsp", dot_spillmem, 0 },
5022 { "spillpsp", dot_spillmem, 1 },
5023 { "spillreg.p", dot_spillreg_p, 0 },
5024 { "spillsp.p", dot_spillmem_p, 0 },
5025 { "spillpsp.p", dot_spillmem_p, 1 },
5026 { "label_state", dot_label_state, 0 },
5027 { "copy_state", dot_copy_state, 0 },
5028 { "unwabi", dot_unwabi, 0 },
5029 { "personality", dot_personality, 0 },
5031 { "estate", dot_estate, 0 },
5033 { "mii", dot_template, 0x0 },
5034 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5035 { "mlx", dot_template, 0x2 },
5036 { "mmi", dot_template, 0x4 },
5037 { "mfi", dot_template, 0x6 },
5038 { "mmf", dot_template, 0x7 },
5039 { "mib", dot_template, 0x8 },
5040 { "mbb", dot_template, 0x9 },
5041 { "bbb", dot_template, 0xb },
5042 { "mmb", dot_template, 0xc },
5043 { "mfb", dot_template, 0xe },
5045 { "lb", dot_scope, 0 },
5046 { "le", dot_scope, 1 },
5048 { "align", dot_align, 0 },
5049 { "regstk", dot_regstk, 0 },
5050 { "rotr", dot_rot, DYNREG_GR },
5051 { "rotf", dot_rot, DYNREG_FR },
5052 { "rotp", dot_rot, DYNREG_PR },
5053 { "lsb", dot_byteorder, 0 },
5054 { "msb", dot_byteorder, 1 },
5055 { "psr", dot_psr, 0 },
5056 { "alias", dot_alias, 0 },
5057 { "secalias", dot_alias, 1 },
5058 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5060 { "xdata1", dot_xdata, 1 },
5061 { "xdata2", dot_xdata, 2 },
5062 { "xdata4", dot_xdata, 4 },
5063 { "xdata8", dot_xdata, 8 },
5064 { "xreal4", dot_xfloat_cons, 'f' },
5065 { "xreal8", dot_xfloat_cons, 'd' },
5066 { "xreal10", dot_xfloat_cons, 'x' },
5067 { "xreal16", dot_xfloat_cons, 'X' },
5068 { "xstring", dot_xstringer, 0 },
5069 { "xstringz", dot_xstringer, 1 },
5071 /* unaligned versions: */
5072 { "xdata2.ua", dot_xdata_ua, 2 },
5073 { "xdata4.ua", dot_xdata_ua, 4 },
5074 { "xdata8.ua", dot_xdata_ua, 8 },
5075 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5076 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5077 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5078 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5080 /* annotations/DV checking support */
5081 { "entry", dot_entry, 0 },
5082 { "mem.offset", dot_mem_offset, 0 },
5083 { "pred.rel", dot_pred_rel, 0 },
5084 { "pred.rel.clear", dot_pred_rel, 'c' },
5085 { "pred.rel.imply", dot_pred_rel, 'i' },
5086 { "pred.rel.mutex", dot_pred_rel, 'm' },
5087 { "pred.safe_across_calls", dot_pred_rel, 's' },
5088 { "reg.val", dot_reg_val, 0 },
5089 { "serialize.data", dot_serialize, 0 },
5090 { "serialize.instruction", dot_serialize, 1 },
5091 { "auto", dot_dv_mode, 'a' },
5092 { "explicit", dot_dv_mode, 'e' },
5093 { "default", dot_dv_mode, 'd' },
5095 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5096 IA-64 aligns data allocation pseudo-ops by default, so we have to
5097 tell it that these ones are supposed to be unaligned. Long term,
5098 should rewrite so that only IA-64 specific data allocation pseudo-ops
5099 are aligned by default. */
5100 {"2byte", stmt_cons_ua, 2},
5101 {"4byte", stmt_cons_ua, 4},
5102 {"8byte", stmt_cons_ua, 8},
5107 static const struct pseudo_opcode
5110 void (*handler) (int);
5115 /* these are more like pseudo-ops, but don't start with a dot */
5116 { "data1", cons, 1 },
5117 { "data2", cons, 2 },
5118 { "data4", cons, 4 },
5119 { "data8", cons, 8 },
5120 { "data16", cons, 16 },
5121 { "real4", stmt_float_cons, 'f' },
5122 { "real8", stmt_float_cons, 'd' },
5123 { "real10", stmt_float_cons, 'x' },
5124 { "real16", stmt_float_cons, 'X' },
5125 { "string", stringer, 0 },
5126 { "stringz", stringer, 1 },
5128 /* unaligned versions: */
5129 { "data2.ua", stmt_cons_ua, 2 },
5130 { "data4.ua", stmt_cons_ua, 4 },
5131 { "data8.ua", stmt_cons_ua, 8 },
5132 { "data16.ua", stmt_cons_ua, 16 },
5133 { "real4.ua", float_cons, 'f' },
5134 { "real8.ua", float_cons, 'd' },
5135 { "real10.ua", float_cons, 'x' },
5136 { "real16.ua", float_cons, 'X' },
5139 /* Declare a register by creating a symbol for it and entering it in
5140 the symbol table. */
5143 declare_register (name, regnum)
5150 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
5152 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
5154 as_fatal ("Inserting \"%s\" into register table failed: %s",
5161 declare_register_set (prefix, num_regs, base_regnum)
5169 for (i = 0; i < num_regs; ++i)
5171 sprintf (name, "%s%u", prefix, i);
5172 declare_register (name, base_regnum + i);
5177 operand_width (opnd)
5178 enum ia64_opnd opnd;
5180 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5181 unsigned int bits = 0;
5185 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5186 bits += odesc->field[i].bits;
5191 static enum operand_match_result
5192 operand_match (idesc, index, e)
5193 const struct ia64_opcode *idesc;
5197 enum ia64_opnd opnd = idesc->operands[index];
5198 int bits, relocatable = 0;
5199 struct insn_fix *fix;
5206 case IA64_OPND_AR_CCV:
5207 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5208 return OPERAND_MATCH;
5211 case IA64_OPND_AR_CSD:
5212 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5213 return OPERAND_MATCH;
5216 case IA64_OPND_AR_PFS:
5217 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5218 return OPERAND_MATCH;
5222 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5223 return OPERAND_MATCH;
5227 if (e->X_op == O_register && e->X_add_number == REG_IP)
5228 return OPERAND_MATCH;
5232 if (e->X_op == O_register && e->X_add_number == REG_PR)
5233 return OPERAND_MATCH;
5236 case IA64_OPND_PR_ROT:
5237 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5238 return OPERAND_MATCH;
5242 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5243 return OPERAND_MATCH;
5246 case IA64_OPND_PSR_L:
5247 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5248 return OPERAND_MATCH;
5251 case IA64_OPND_PSR_UM:
5252 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5253 return OPERAND_MATCH;
5257 if (e->X_op == O_constant)
5259 if (e->X_add_number == 1)
5260 return OPERAND_MATCH;
5262 return OPERAND_OUT_OF_RANGE;
5267 if (e->X_op == O_constant)
5269 if (e->X_add_number == 8)
5270 return OPERAND_MATCH;
5272 return OPERAND_OUT_OF_RANGE;
5277 if (e->X_op == O_constant)
5279 if (e->X_add_number == 16)
5280 return OPERAND_MATCH;
5282 return OPERAND_OUT_OF_RANGE;
5286 /* register operands: */
5289 if (e->X_op == O_register && e->X_add_number >= REG_AR
5290 && e->X_add_number < REG_AR + 128)
5291 return OPERAND_MATCH;
5296 if (e->X_op == O_register && e->X_add_number >= REG_BR
5297 && e->X_add_number < REG_BR + 8)
5298 return OPERAND_MATCH;
5302 if (e->X_op == O_register && e->X_add_number >= REG_CR
5303 && e->X_add_number < REG_CR + 128)
5304 return OPERAND_MATCH;
5311 if (e->X_op == O_register && e->X_add_number >= REG_FR
5312 && e->X_add_number < REG_FR + 128)
5313 return OPERAND_MATCH;
5318 if (e->X_op == O_register && e->X_add_number >= REG_P
5319 && e->X_add_number < REG_P + 64)
5320 return OPERAND_MATCH;
5326 if (e->X_op == O_register && e->X_add_number >= REG_GR
5327 && e->X_add_number < REG_GR + 128)
5328 return OPERAND_MATCH;
5331 case IA64_OPND_R3_2:
5332 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5334 if (e->X_add_number < REG_GR + 4)
5335 return OPERAND_MATCH;
5336 else if (e->X_add_number < REG_GR + 128)
5337 return OPERAND_OUT_OF_RANGE;
5341 /* indirect operands: */
5342 case IA64_OPND_CPUID_R3:
5343 case IA64_OPND_DBR_R3:
5344 case IA64_OPND_DTR_R3:
5345 case IA64_OPND_ITR_R3:
5346 case IA64_OPND_IBR_R3:
5347 case IA64_OPND_MSR_R3:
5348 case IA64_OPND_PKR_R3:
5349 case IA64_OPND_PMC_R3:
5350 case IA64_OPND_PMD_R3:
5351 case IA64_OPND_RR_R3:
5352 if (e->X_op == O_index && e->X_op_symbol
5353 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5354 == opnd - IA64_OPND_CPUID_R3))
5355 return OPERAND_MATCH;
5359 if (e->X_op == O_index && !e->X_op_symbol)
5360 return OPERAND_MATCH;
5363 /* immediate operands: */
5364 case IA64_OPND_CNT2a:
5365 case IA64_OPND_LEN4:
5366 case IA64_OPND_LEN6:
5367 bits = operand_width (idesc->operands[index]);
5368 if (e->X_op == O_constant)
5370 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5371 return OPERAND_MATCH;
5373 return OPERAND_OUT_OF_RANGE;
5377 case IA64_OPND_CNT2b:
5378 if (e->X_op == O_constant)
5380 if ((bfd_vma) (e->X_add_number - 1) < 3)
5381 return OPERAND_MATCH;
5383 return OPERAND_OUT_OF_RANGE;
5387 case IA64_OPND_CNT2c:
5388 val = e->X_add_number;
5389 if (e->X_op == O_constant)
5391 if ((val == 0 || val == 7 || val == 15 || val == 16))
5392 return OPERAND_MATCH;
5394 return OPERAND_OUT_OF_RANGE;
5399 /* SOR must be an integer multiple of 8 */
5400 if (e->X_op == O_constant && e->X_add_number & 0x7)
5401 return OPERAND_OUT_OF_RANGE;
5404 if (e->X_op == O_constant)
5406 if ((bfd_vma) e->X_add_number <= 96)
5407 return OPERAND_MATCH;
5409 return OPERAND_OUT_OF_RANGE;
5413 case IA64_OPND_IMMU62:
5414 if (e->X_op == O_constant)
5416 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5417 return OPERAND_MATCH;
5419 return OPERAND_OUT_OF_RANGE;
5423 /* FIXME -- need 62-bit relocation type */
5424 as_bad (_("62-bit relocation not yet implemented"));
5428 case IA64_OPND_IMMU64:
5429 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5430 || e->X_op == O_subtract)
5432 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5433 fix->code = BFD_RELOC_IA64_IMM64;
5434 if (e->X_op != O_subtract)
5436 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5437 if (e->X_op == O_pseudo_fixup)
5441 fix->opnd = idesc->operands[index];
5444 ++CURR_SLOT.num_fixups;
5445 return OPERAND_MATCH;
5447 else if (e->X_op == O_constant)
5448 return OPERAND_MATCH;
5451 case IA64_OPND_CCNT5:
5452 case IA64_OPND_CNT5:
5453 case IA64_OPND_CNT6:
5454 case IA64_OPND_CPOS6a:
5455 case IA64_OPND_CPOS6b:
5456 case IA64_OPND_CPOS6c:
5457 case IA64_OPND_IMMU2:
5458 case IA64_OPND_IMMU7a:
5459 case IA64_OPND_IMMU7b:
5460 case IA64_OPND_IMMU21:
5461 case IA64_OPND_IMMU24:
5462 case IA64_OPND_MBTYPE4:
5463 case IA64_OPND_MHTYPE8:
5464 case IA64_OPND_POS6:
5465 bits = operand_width (idesc->operands[index]);
5466 if (e->X_op == O_constant)
5468 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5469 return OPERAND_MATCH;
5471 return OPERAND_OUT_OF_RANGE;
5475 case IA64_OPND_IMMU9:
5476 bits = operand_width (idesc->operands[index]);
5477 if (e->X_op == O_constant)
5479 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5481 int lobits = e->X_add_number & 0x3;
5482 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5483 e->X_add_number |= (bfd_vma) 0x3;
5484 return OPERAND_MATCH;
5487 return OPERAND_OUT_OF_RANGE;
5491 case IA64_OPND_IMM44:
5492 /* least 16 bits must be zero */
5493 if ((e->X_add_number & 0xffff) != 0)
5494 /* XXX technically, this is wrong: we should not be issuing warning
5495 messages until we're sure this instruction pattern is going to
5497 as_warn (_("lower 16 bits of mask ignored"));
5499 if (e->X_op == O_constant)
5501 if (((e->X_add_number >= 0
5502 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5503 || (e->X_add_number < 0
5504 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5507 if (e->X_add_number >= 0
5508 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5510 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5512 return OPERAND_MATCH;
5515 return OPERAND_OUT_OF_RANGE;
5519 case IA64_OPND_IMM17:
5520 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5521 if (e->X_op == O_constant)
5523 if (((e->X_add_number >= 0
5524 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5525 || (e->X_add_number < 0
5526 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5529 if (e->X_add_number >= 0
5530 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5532 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5534 return OPERAND_MATCH;
5537 return OPERAND_OUT_OF_RANGE;
5541 case IA64_OPND_IMM14:
5542 case IA64_OPND_IMM22:
5544 case IA64_OPND_IMM1:
5545 case IA64_OPND_IMM8:
5546 case IA64_OPND_IMM8U4:
5547 case IA64_OPND_IMM8M1:
5548 case IA64_OPND_IMM8M1U4:
5549 case IA64_OPND_IMM8M1U8:
5550 case IA64_OPND_IMM9a:
5551 case IA64_OPND_IMM9b:
5552 bits = operand_width (idesc->operands[index]);
5553 if (relocatable && (e->X_op == O_symbol
5554 || e->X_op == O_subtract
5555 || e->X_op == O_pseudo_fixup))
5557 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5559 if (idesc->operands[index] == IA64_OPND_IMM14)
5560 fix->code = BFD_RELOC_IA64_IMM14;
5562 fix->code = BFD_RELOC_IA64_IMM22;
5564 if (e->X_op != O_subtract)
5566 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5567 if (e->X_op == O_pseudo_fixup)
5571 fix->opnd = idesc->operands[index];
5574 ++CURR_SLOT.num_fixups;
5575 return OPERAND_MATCH;
5577 else if (e->X_op != O_constant
5578 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5579 return OPERAND_MISMATCH;
5581 if (opnd == IA64_OPND_IMM8M1U4)
5583 /* Zero is not valid for unsigned compares that take an adjusted
5584 constant immediate range. */
5585 if (e->X_add_number == 0)
5586 return OPERAND_OUT_OF_RANGE;
5588 /* Sign-extend 32-bit unsigned numbers, so that the following range
5589 checks will work. */
5590 val = e->X_add_number;
5591 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5592 && ((val & ((bfd_vma) 1 << 31)) != 0))
5593 val = ((val << 32) >> 32);
5595 /* Check for 0x100000000. This is valid because
5596 0x100000000-1 is the same as ((uint32_t) -1). */
5597 if (val == ((bfd_signed_vma) 1 << 32))
5598 return OPERAND_MATCH;
5602 else if (opnd == IA64_OPND_IMM8M1U8)
5604 /* Zero is not valid for unsigned compares that take an adjusted
5605 constant immediate range. */
5606 if (e->X_add_number == 0)
5607 return OPERAND_OUT_OF_RANGE;
5609 /* Check for 0x10000000000000000. */
5610 if (e->X_op == O_big)
5612 if (generic_bignum[0] == 0
5613 && generic_bignum[1] == 0
5614 && generic_bignum[2] == 0
5615 && generic_bignum[3] == 0
5616 && generic_bignum[4] == 1)
5617 return OPERAND_MATCH;
5619 return OPERAND_OUT_OF_RANGE;
5622 val = e->X_add_number - 1;
5624 else if (opnd == IA64_OPND_IMM8M1)
5625 val = e->X_add_number - 1;
5626 else if (opnd == IA64_OPND_IMM8U4)
5628 /* Sign-extend 32-bit unsigned numbers, so that the following range
5629 checks will work. */
5630 val = e->X_add_number;
5631 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5632 && ((val & ((bfd_vma) 1 << 31)) != 0))
5633 val = ((val << 32) >> 32);
5636 val = e->X_add_number;
5638 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5639 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5640 return OPERAND_MATCH;
5642 return OPERAND_OUT_OF_RANGE;
5644 case IA64_OPND_INC3:
5645 /* +/- 1, 4, 8, 16 */
5646 val = e->X_add_number;
5649 if (e->X_op == O_constant)
5651 if ((val == 1 || val == 4 || val == 8 || val == 16))
5652 return OPERAND_MATCH;
5654 return OPERAND_OUT_OF_RANGE;
5658 case IA64_OPND_TGT25:
5659 case IA64_OPND_TGT25b:
5660 case IA64_OPND_TGT25c:
5661 case IA64_OPND_TGT64:
5662 if (e->X_op == O_symbol)
5664 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5665 if (opnd == IA64_OPND_TGT25)
5666 fix->code = BFD_RELOC_IA64_PCREL21F;
5667 else if (opnd == IA64_OPND_TGT25b)
5668 fix->code = BFD_RELOC_IA64_PCREL21M;
5669 else if (opnd == IA64_OPND_TGT25c)
5670 fix->code = BFD_RELOC_IA64_PCREL21B;
5671 else if (opnd == IA64_OPND_TGT64)
5672 fix->code = BFD_RELOC_IA64_PCREL60B;
5676 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5677 fix->opnd = idesc->operands[index];
5680 ++CURR_SLOT.num_fixups;
5681 return OPERAND_MATCH;
5683 case IA64_OPND_TAG13:
5684 case IA64_OPND_TAG13b:
5688 return OPERAND_MATCH;
5691 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5692 /* There are no external relocs for TAG13/TAG13b fields, so we
5693 create a dummy reloc. This will not live past md_apply_fix3. */
5694 fix->code = BFD_RELOC_UNUSED;
5695 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5696 fix->opnd = idesc->operands[index];
5699 ++CURR_SLOT.num_fixups;
5700 return OPERAND_MATCH;
5707 case IA64_OPND_LDXMOV:
5708 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5709 fix->code = BFD_RELOC_IA64_LDXMOV;
5710 fix->opnd = idesc->operands[index];
5713 ++CURR_SLOT.num_fixups;
5714 return OPERAND_MATCH;
5719 return OPERAND_MISMATCH;
5728 memset (e, 0, sizeof (*e));
5731 if (*input_line_pointer != '}')
5733 sep = *input_line_pointer++;
5737 if (!md.manual_bundling)
5738 as_warn ("Found '}' when manual bundling is off");
5740 CURR_SLOT.manual_bundling_off = 1;
5741 md.manual_bundling = 0;
5747 /* Returns the next entry in the opcode table that matches the one in
5748 IDESC, and frees the entry in IDESC. If no matching entry is
5749 found, NULL is returned instead. */
5751 static struct ia64_opcode *
5752 get_next_opcode (struct ia64_opcode *idesc)
5754 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5755 ia64_free_opcode (idesc);
5759 /* Parse the operands for the opcode and find the opcode variant that
5760 matches the specified operands, or NULL if no match is possible. */
5762 static struct ia64_opcode *
5763 parse_operands (idesc)
5764 struct ia64_opcode *idesc;
5766 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5767 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5768 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5769 enum operand_match_result result;
5771 char *first_arg = 0, *end, *saved_input_pointer;
5774 assert (strlen (idesc->name) <= 128);
5776 strcpy (mnemonic, idesc->name);
5777 if (idesc->operands[2] == IA64_OPND_SOF)
5779 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5780 can't parse the first operand until we have parsed the
5781 remaining operands of the "alloc" instruction. */
5783 first_arg = input_line_pointer;
5784 end = strchr (input_line_pointer, '=');
5787 as_bad ("Expected separator `='");
5790 input_line_pointer = end + 1;
5795 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5797 sep = parse_operand (CURR_SLOT.opnd + i);
5798 if (CURR_SLOT.opnd[i].X_op == O_absent)
5803 if (sep != '=' && sep != ',')
5808 if (num_outputs > 0)
5809 as_bad ("Duplicate equal sign (=) in instruction");
5811 num_outputs = i + 1;
5816 as_bad ("Illegal operand separator `%c'", sep);
5820 if (idesc->operands[2] == IA64_OPND_SOF)
5822 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5823 know (strcmp (idesc->name, "alloc") == 0);
5824 if (num_operands == 5 /* first_arg not included in this count! */
5825 && CURR_SLOT.opnd[2].X_op == O_constant
5826 && CURR_SLOT.opnd[3].X_op == O_constant
5827 && CURR_SLOT.opnd[4].X_op == O_constant
5828 && CURR_SLOT.opnd[5].X_op == O_constant)
5830 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5831 CURR_SLOT.opnd[3].X_add_number,
5832 CURR_SLOT.opnd[4].X_add_number,
5833 CURR_SLOT.opnd[5].X_add_number);
5835 /* now we can parse the first arg: */
5836 saved_input_pointer = input_line_pointer;
5837 input_line_pointer = first_arg;
5838 sep = parse_operand (CURR_SLOT.opnd + 0);
5840 --num_outputs; /* force error */
5841 input_line_pointer = saved_input_pointer;
5843 CURR_SLOT.opnd[2].X_add_number = sof;
5844 CURR_SLOT.opnd[3].X_add_number
5845 = sof - CURR_SLOT.opnd[4].X_add_number;
5846 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5850 highest_unmatched_operand = 0;
5851 curr_out_of_range_pos = -1;
5853 expected_operand = idesc->operands[0];
5854 for (; idesc; idesc = get_next_opcode (idesc))
5856 if (num_outputs != idesc->num_outputs)
5857 continue; /* mismatch in # of outputs */
5859 CURR_SLOT.num_fixups = 0;
5861 /* Try to match all operands. If we see an out-of-range operand,
5862 then continue trying to match the rest of the operands, since if
5863 the rest match, then this idesc will give the best error message. */
5865 out_of_range_pos = -1;
5866 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5868 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5869 if (result != OPERAND_MATCH)
5871 if (result != OPERAND_OUT_OF_RANGE)
5873 if (out_of_range_pos < 0)
5874 /* remember position of the first out-of-range operand: */
5875 out_of_range_pos = i;
5879 /* If we did not match all operands, or if at least one operand was
5880 out-of-range, then this idesc does not match. Keep track of which
5881 idesc matched the most operands before failing. If we have two
5882 idescs that failed at the same position, and one had an out-of-range
5883 operand, then prefer the out-of-range operand. Thus if we have
5884 "add r0=0x1000000,r1" we get an error saying the constant is out
5885 of range instead of an error saying that the constant should have been
5888 if (i != num_operands || out_of_range_pos >= 0)
5890 if (i > highest_unmatched_operand
5891 || (i == highest_unmatched_operand
5892 && out_of_range_pos > curr_out_of_range_pos))
5894 highest_unmatched_operand = i;
5895 if (out_of_range_pos >= 0)
5897 expected_operand = idesc->operands[out_of_range_pos];
5898 error_pos = out_of_range_pos;
5902 expected_operand = idesc->operands[i];
5905 curr_out_of_range_pos = out_of_range_pos;
5910 if (num_operands < NELEMS (idesc->operands)
5911 && idesc->operands[num_operands])
5912 continue; /* mismatch in number of arguments */
5918 if (expected_operand)
5919 as_bad ("Operand %u of `%s' should be %s",
5920 error_pos + 1, mnemonic,
5921 elf64_ia64_operands[expected_operand].desc);
5923 as_bad ("Operand mismatch");
5929 /* Keep track of state necessary to determine whether a NOP is necessary
5930 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5931 detect a case where additional NOPs may be necessary. */
5933 errata_nop_necessary_p (slot, insn_unit)
5935 enum ia64_unit insn_unit;
5938 struct group *this_group = md.last_groups + md.group_idx;
5939 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5940 struct ia64_opcode *idesc = slot->idesc;
5942 /* Test whether this could be the first insn in a problematic sequence. */
5943 if (insn_unit == IA64_UNIT_F)
5945 for (i = 0; i < idesc->num_outputs; i++)
5946 if (idesc->operands[i] == IA64_OPND_P1
5947 || idesc->operands[i] == IA64_OPND_P2)
5949 int regno = slot->opnd[i].X_add_number - REG_P;
5950 /* Ignore invalid operands; they generate errors elsewhere. */
5953 this_group->p_reg_set[regno] = 1;
5957 /* Test whether this could be the second insn in a problematic sequence. */
5958 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5959 && prev_group->p_reg_set[slot->qp_regno])
5961 for (i = 0; i < idesc->num_outputs; i++)
5962 if (idesc->operands[i] == IA64_OPND_R1
5963 || idesc->operands[i] == IA64_OPND_R2
5964 || idesc->operands[i] == IA64_OPND_R3)
5966 int regno = slot->opnd[i].X_add_number - REG_GR;
5967 /* Ignore invalid operands; they generate errors elsewhere. */
5970 if (strncmp (idesc->name, "add", 3) != 0
5971 && strncmp (idesc->name, "sub", 3) != 0
5972 && strncmp (idesc->name, "shladd", 6) != 0
5973 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5974 this_group->g_reg_set_conditionally[regno] = 1;
5978 /* Test whether this could be the third insn in a problematic sequence. */
5979 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5981 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5982 idesc->operands[i] == IA64_OPND_R3
5983 /* For mov indirect. */
5984 || idesc->operands[i] == IA64_OPND_RR_R3
5985 || idesc->operands[i] == IA64_OPND_DBR_R3
5986 || idesc->operands[i] == IA64_OPND_IBR_R3
5987 || idesc->operands[i] == IA64_OPND_PKR_R3
5988 || idesc->operands[i] == IA64_OPND_PMC_R3
5989 || idesc->operands[i] == IA64_OPND_PMD_R3
5990 || idesc->operands[i] == IA64_OPND_MSR_R3
5991 || idesc->operands[i] == IA64_OPND_CPUID_R3
5993 || idesc->operands[i] == IA64_OPND_ITR_R3
5994 || idesc->operands[i] == IA64_OPND_DTR_R3
5995 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5996 || idesc->operands[i] == IA64_OPND_MR3)
5998 int regno = slot->opnd[i].X_add_number - REG_GR;
5999 /* Ignore invalid operands; they generate errors elsewhere. */
6002 if (idesc->operands[i] == IA64_OPND_R3)
6004 if (strcmp (idesc->name, "fc") != 0
6005 && strcmp (idesc->name, "tak") != 0
6006 && strcmp (idesc->name, "thash") != 0
6007 && strcmp (idesc->name, "tpa") != 0
6008 && strcmp (idesc->name, "ttag") != 0
6009 && strncmp (idesc->name, "ptr", 3) != 0
6010 && strncmp (idesc->name, "ptc", 3) != 0
6011 && strncmp (idesc->name, "probe", 5) != 0)
6014 if (prev_group->g_reg_set_conditionally[regno])
6022 build_insn (slot, insnp)
6026 const struct ia64_operand *odesc, *o2desc;
6027 struct ia64_opcode *idesc = slot->idesc;
6028 bfd_signed_vma insn, val;
6032 insn = idesc->opcode | slot->qp_regno;
6034 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6036 if (slot->opnd[i].X_op == O_register
6037 || slot->opnd[i].X_op == O_constant
6038 || slot->opnd[i].X_op == O_index)
6039 val = slot->opnd[i].X_add_number;
6040 else if (slot->opnd[i].X_op == O_big)
6042 /* This must be the value 0x10000000000000000. */
6043 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6049 switch (idesc->operands[i])
6051 case IA64_OPND_IMMU64:
6052 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6053 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6054 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6055 | (((val >> 63) & 0x1) << 36));
6058 case IA64_OPND_IMMU62:
6059 val &= 0x3fffffffffffffffULL;
6060 if (val != slot->opnd[i].X_add_number)
6061 as_warn (_("Value truncated to 62 bits"));
6062 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6063 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6066 case IA64_OPND_TGT64:
6068 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6069 insn |= ((((val >> 59) & 0x1) << 36)
6070 | (((val >> 0) & 0xfffff) << 13));
6101 case IA64_OPND_R3_2:
6102 case IA64_OPND_CPUID_R3:
6103 case IA64_OPND_DBR_R3:
6104 case IA64_OPND_DTR_R3:
6105 case IA64_OPND_ITR_R3:
6106 case IA64_OPND_IBR_R3:
6108 case IA64_OPND_MSR_R3:
6109 case IA64_OPND_PKR_R3:
6110 case IA64_OPND_PMC_R3:
6111 case IA64_OPND_PMD_R3:
6112 case IA64_OPND_RR_R3:
6120 odesc = elf64_ia64_operands + idesc->operands[i];
6121 err = (*odesc->insert) (odesc, val, &insn);
6123 as_bad_where (slot->src_file, slot->src_line,
6124 "Bad operand value: %s", err);
6125 if (idesc->flags & IA64_OPCODE_PSEUDO)
6127 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6128 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6130 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6131 (*o2desc->insert) (o2desc, val, &insn);
6133 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6134 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6135 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6137 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6138 (*o2desc->insert) (o2desc, 64 - val, &insn);
6148 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
6149 unsigned int manual_bundling = 0;
6150 enum ia64_unit required_unit, insn_unit = 0;
6151 enum ia64_insn_type type[3], insn_type;
6152 unsigned int template, orig_template;
6153 bfd_vma insn[3] = { -1, -1, -1 };
6154 struct ia64_opcode *idesc;
6155 int end_of_insn_group = 0, user_template = -1;
6156 int n, i, j, first, curr;
6157 unw_rec_list *ptr, *last_ptr, *end_ptr;
6158 bfd_vma t0 = 0, t1 = 0;
6159 struct label_fix *lfix;
6160 struct insn_fix *ifix;
6166 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6167 know (first >= 0 & first < NUM_SLOTS);
6168 n = MIN (3, md.num_slots_in_use);
6170 /* Determine template: user user_template if specified, best match
6173 if (md.slot[first].user_template >= 0)
6174 user_template = template = md.slot[first].user_template;
6177 /* Auto select appropriate template. */
6178 memset (type, 0, sizeof (type));
6180 for (i = 0; i < n; ++i)
6182 if (md.slot[curr].label_fixups && i != 0)
6184 type[i] = md.slot[curr].idesc->type;
6185 curr = (curr + 1) % NUM_SLOTS;
6187 template = best_template[type[0]][type[1]][type[2]];
6190 /* initialize instructions with appropriate nops: */
6191 for (i = 0; i < 3; ++i)
6192 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
6196 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6197 from the start of the frag. */
6198 addr_mod = frag_now_fix () & 15;
6199 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6200 as_bad (_("instruction address is not a multiple of 16"));
6201 frag_now->insn_addr = addr_mod;
6202 frag_now->has_code = 1;
6204 /* now fill in slots with as many insns as possible: */
6206 idesc = md.slot[curr].idesc;
6207 end_of_insn_group = 0;
6208 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6210 /* If we have unwind records, we may need to update some now. */
6211 ptr = md.slot[curr].unwind_record;
6214 /* Find the last prologue/body record in the list for the current
6215 insn, and set the slot number for all records up to that point.
6216 This needs to be done now, because prologue/body records refer to
6217 the current point, not the point after the instruction has been
6218 issued. This matters because there may have been nops emitted
6219 meanwhile. Any non-prologue non-body record followed by a
6220 prologue/body record must also refer to the current point. */
6222 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6223 for (; ptr != end_ptr; ptr = ptr->next)
6224 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6225 || ptr->r.type == body)
6229 /* Make last_ptr point one after the last prologue/body
6231 last_ptr = last_ptr->next;
6232 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6235 ptr->slot_number = (unsigned long) f + i;
6236 ptr->slot_frag = frag_now;
6238 /* Remove the initialized records, so that we won't accidentally
6239 update them again if we insert a nop and continue. */
6240 md.slot[curr].unwind_record = last_ptr;
6244 if (idesc->flags & IA64_OPCODE_SLOT2)
6246 if (manual_bundling && i != 2)
6247 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6248 "`%s' must be last in bundle", idesc->name);
6252 if (idesc->flags & IA64_OPCODE_LAST)
6255 unsigned int required_template;
6257 /* If we need a stop bit after an M slot, our only choice is
6258 template 5 (M;;MI). If we need a stop bit after a B
6259 slot, our only choice is to place it at the end of the
6260 bundle, because the only available templates are MIB,
6261 MBB, BBB, MMB, and MFB. We don't handle anything other
6262 than M and B slots because these are the only kind of
6263 instructions that can have the IA64_OPCODE_LAST bit set. */
6264 required_template = template;
6265 switch (idesc->type)
6269 required_template = 5;
6277 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6278 "Internal error: don't know how to force %s to end"
6279 "of instruction group", idesc->name);
6283 if (manual_bundling && i != required_slot)
6284 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6285 "`%s' must be last in instruction group",
6287 if (required_slot < i)
6288 /* Can't fit this instruction. */
6292 if (required_template != template)
6294 /* If we switch the template, we need to reset the NOPs
6295 after slot i. The slot-types of the instructions ahead
6296 of i never change, so we don't need to worry about
6297 changing NOPs in front of this slot. */
6298 for (j = i; j < 3; ++j)
6299 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6301 template = required_template;
6303 if (curr != first && md.slot[curr].label_fixups)
6305 if (manual_bundling_on)
6306 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6307 "Label must be first in a bundle");
6308 /* This insn must go into the first slot of a bundle. */
6312 manual_bundling_on = md.slot[curr].manual_bundling_on;
6313 manual_bundling_off = md.slot[curr].manual_bundling_off;
6315 if (manual_bundling_on)
6318 manual_bundling = 1;
6320 break; /* need to start a new bundle */
6323 if (end_of_insn_group && md.num_slots_in_use >= 1)
6325 /* We need an instruction group boundary in the middle of a
6326 bundle. See if we can switch to an other template with
6327 an appropriate boundary. */
6329 orig_template = template;
6330 if (i == 1 && (user_template == 4
6331 || (user_template < 0
6332 && (ia64_templ_desc[template].exec_unit[0]
6336 end_of_insn_group = 0;
6338 else if (i == 2 && (user_template == 0
6339 || (user_template < 0
6340 && (ia64_templ_desc[template].exec_unit[1]
6342 /* This test makes sure we don't switch the template if
6343 the next instruction is one that needs to be first in
6344 an instruction group. Since all those instructions are
6345 in the M group, there is no way such an instruction can
6346 fit in this bundle even if we switch the template. The
6347 reason we have to check for this is that otherwise we
6348 may end up generating "MI;;I M.." which has the deadly
6349 effect that the second M instruction is no longer the
6350 first in the bundle! --davidm 99/12/16 */
6351 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6354 end_of_insn_group = 0;
6356 else if (curr != first)
6357 /* can't fit this insn */
6360 if (template != orig_template)
6361 /* if we switch the template, we need to reset the NOPs
6362 after slot i. The slot-types of the instructions ahead
6363 of i never change, so we don't need to worry about
6364 changing NOPs in front of this slot. */
6365 for (j = i; j < 3; ++j)
6366 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6368 required_unit = ia64_templ_desc[template].exec_unit[i];
6370 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6371 if (idesc->type == IA64_TYPE_DYN)
6373 enum ia64_opnd opnd1, opnd2;
6375 if ((strcmp (idesc->name, "nop") == 0)
6376 || (strcmp (idesc->name, "hint") == 0)
6377 || (strcmp (idesc->name, "break") == 0))
6378 insn_unit = required_unit;
6379 else if (strcmp (idesc->name, "chk.s") == 0
6380 || strcmp (idesc->name, "mov") == 0)
6382 insn_unit = IA64_UNIT_M;
6383 if (required_unit == IA64_UNIT_I
6384 || (required_unit == IA64_UNIT_F && template == 6))
6385 insn_unit = IA64_UNIT_I;
6388 as_fatal ("emit_one_bundle: unexpected dynamic op");
6390 sprintf (mnemonic, "%s.%c", idesc->name, "?imbfxx"[insn_unit]);
6391 opnd1 = idesc->operands[0];
6392 opnd2 = idesc->operands[1];
6393 ia64_free_opcode (idesc);
6394 idesc = ia64_find_opcode (mnemonic);
6395 /* moves to/from ARs have collisions */
6396 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6398 while (idesc != NULL
6399 && (idesc->operands[0] != opnd1
6400 || idesc->operands[1] != opnd2))
6401 idesc = get_next_opcode (idesc);
6405 /* no other resolved dynamic ops have collisions */
6406 know (!get_next_opcode (idesc));
6408 md.slot[curr].idesc = idesc;
6412 insn_type = idesc->type;
6413 insn_unit = IA64_UNIT_NIL;
6417 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6418 insn_unit = required_unit;
6420 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6421 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6422 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6423 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6424 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6429 if (insn_unit != required_unit)
6431 if (required_unit == IA64_UNIT_L
6432 && insn_unit == IA64_UNIT_I
6433 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
6435 /* we got ourselves an MLX template but the current
6436 instruction isn't an X-unit, or an I-unit instruction
6437 that can go into the X slot of an MLX template. Duh. */
6438 if (md.num_slots_in_use >= NUM_SLOTS)
6440 as_bad_where (md.slot[curr].src_file,
6441 md.slot[curr].src_line,
6442 "`%s' can't go in X slot of "
6443 "MLX template", idesc->name);
6444 /* drop this insn so we don't livelock: */
6445 --md.num_slots_in_use;
6449 continue; /* try next slot */
6452 if (debug_type == DEBUG_DWARF2 || md.slot[curr].loc_directive_seen)
6454 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6456 md.slot[curr].loc_directive_seen = 0;
6457 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6460 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
6461 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
6463 build_insn (md.slot + curr, insn + i);
6465 ptr = md.slot[curr].unwind_record;
6468 /* Set slot numbers for all remaining unwind records belonging to the
6469 current insn. There can not be any prologue/body unwind records
6471 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6472 for (; ptr != end_ptr; ptr = ptr->next)
6474 ptr->slot_number = (unsigned long) f + i;
6475 ptr->slot_frag = frag_now;
6477 md.slot[curr].unwind_record = NULL;
6480 if (required_unit == IA64_UNIT_L)
6483 /* skip one slot for long/X-unit instructions */
6486 --md.num_slots_in_use;
6488 /* now is a good time to fix up the labels for this insn: */
6489 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6491 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6492 symbol_set_frag (lfix->sym, frag_now);
6494 /* and fix up the tags also. */
6495 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6497 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6498 symbol_set_frag (lfix->sym, frag_now);
6501 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6503 ifix = md.slot[curr].fixup + j;
6504 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6505 &ifix->expr, ifix->is_pcrel, ifix->code);
6506 fix->tc_fix_data.opnd = ifix->opnd;
6507 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6508 fix->fx_file = md.slot[curr].src_file;
6509 fix->fx_line = md.slot[curr].src_line;
6512 end_of_insn_group = md.slot[curr].end_of_insn_group;
6514 if (end_of_insn_group)
6516 md.group_idx = (md.group_idx + 1) % 3;
6517 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6521 ia64_free_opcode (md.slot[curr].idesc);
6522 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6523 md.slot[curr].user_template = -1;
6525 if (manual_bundling_off)
6527 manual_bundling = 0;
6530 curr = (curr + 1) % NUM_SLOTS;
6531 idesc = md.slot[curr].idesc;
6533 if (manual_bundling)
6535 if (md.num_slots_in_use > 0)
6537 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6538 "`%s' does not fit into %s template",
6539 idesc->name, ia64_templ_desc[template].name);
6540 --md.num_slots_in_use;
6543 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6544 "Missing '}' at end of file");
6546 know (md.num_slots_in_use < NUM_SLOTS);
6548 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6549 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6551 number_to_chars_littleendian (f + 0, t0, 8);
6552 number_to_chars_littleendian (f + 8, t1, 8);
6556 unwind.list->next_slot_number = (unsigned long) f + 16;
6557 unwind.list->next_slot_frag = frag_now;
6562 md_parse_option (c, arg)
6569 /* Switches from the Intel assembler. */
6571 if (strcmp (arg, "ilp64") == 0
6572 || strcmp (arg, "lp64") == 0
6573 || strcmp (arg, "p64") == 0)
6575 md.flags |= EF_IA_64_ABI64;
6577 else if (strcmp (arg, "ilp32") == 0)
6579 md.flags &= ~EF_IA_64_ABI64;
6581 else if (strcmp (arg, "le") == 0)
6583 md.flags &= ~EF_IA_64_BE;
6584 default_big_endian = 0;
6586 else if (strcmp (arg, "be") == 0)
6588 md.flags |= EF_IA_64_BE;
6589 default_big_endian = 1;
6596 if (strcmp (arg, "so") == 0)
6598 /* Suppress signon message. */
6600 else if (strcmp (arg, "pi") == 0)
6602 /* Reject privileged instructions. FIXME */
6604 else if (strcmp (arg, "us") == 0)
6606 /* Allow union of signed and unsigned range. FIXME */
6608 else if (strcmp (arg, "close_fcalls") == 0)
6610 /* Do not resolve global function calls. */
6617 /* temp[="prefix"] Insert temporary labels into the object file
6618 symbol table prefixed by "prefix".
6619 Default prefix is ":temp:".
6624 /* indirect=<tgt> Assume unannotated indirect branches behavior
6625 according to <tgt> --
6626 exit: branch out from the current context (default)
6627 labels: all labels in context may be branch targets
6629 if (strncmp (arg, "indirect=", 9) != 0)
6634 /* -X conflicts with an ignored option, use -x instead */
6636 if (!arg || strcmp (arg, "explicit") == 0)
6638 /* set default mode to explicit */
6639 md.default_explicit_mode = 1;
6642 else if (strcmp (arg, "auto") == 0)
6644 md.default_explicit_mode = 0;
6646 else if (strcmp (arg, "debug") == 0)
6650 else if (strcmp (arg, "debugx") == 0)
6652 md.default_explicit_mode = 1;
6657 as_bad (_("Unrecognized option '-x%s'"), arg);
6662 /* nops Print nops statistics. */
6665 /* GNU specific switches for gcc. */
6666 case OPTION_MCONSTANT_GP:
6667 md.flags |= EF_IA_64_CONS_GP;
6670 case OPTION_MAUTO_PIC:
6671 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6682 md_show_usage (stream)
6687 --mconstant-gp mark output file as using the constant-GP model\n\
6688 (sets ELF header flag EF_IA_64_CONS_GP)\n\
6689 --mauto-pic mark output file as using the constant-GP model\n\
6690 without function descriptors (sets ELF header flag\n\
6691 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
6692 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6693 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6694 -x | -xexplicit turn on dependency violation checking (default)\n\
6695 -xauto automagically remove dependency violations\n\
6696 -xdebug debug dependency violation checker\n"),
6701 ia64_after_parse_args ()
6703 if (debug_type == DEBUG_STABS)
6704 as_fatal (_("--gstabs is not supported for ia64"));
6707 /* Return true if TYPE fits in TEMPL at SLOT. */
6710 match (int templ, int type, int slot)
6712 enum ia64_unit unit;
6715 unit = ia64_templ_desc[templ].exec_unit[slot];
6718 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6720 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6722 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6723 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6724 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6725 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6726 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6727 default: result = 0; break;
6732 /* Add a bit of extra goodness if a nop of type F or B would fit
6733 in TEMPL at SLOT. */
6736 extra_goodness (int templ, int slot)
6738 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6740 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6745 /* This function is called once, at assembler startup time. It sets
6746 up all the tables, etc. that the MD part of the assembler will need
6747 that can be determined before arguments are parsed. */
6751 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6756 md.explicit_mode = md.default_explicit_mode;
6758 bfd_set_section_alignment (stdoutput, text_section, 4);
6760 /* Make sure function pointers get initialized. */
6761 target_big_endian = -1;
6762 dot_byteorder (default_big_endian);
6764 alias_hash = hash_new ();
6765 alias_name_hash = hash_new ();
6766 secalias_hash = hash_new ();
6767 secalias_name_hash = hash_new ();
6769 pseudo_func[FUNC_DTP_MODULE].u.sym =
6770 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
6771 &zero_address_frag);
6773 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
6774 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
6775 &zero_address_frag);
6777 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6778 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6779 &zero_address_frag);
6781 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6782 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6783 &zero_address_frag);
6785 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6786 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6787 &zero_address_frag);
6789 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
6790 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
6791 &zero_address_frag);
6793 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6794 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6795 &zero_address_frag);
6797 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6798 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6799 &zero_address_frag);
6801 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6802 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6803 &zero_address_frag);
6805 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6806 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6807 &zero_address_frag);
6809 pseudo_func[FUNC_TP_RELATIVE].u.sym =
6810 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
6811 &zero_address_frag);
6813 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6814 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6815 &zero_address_frag);
6817 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6818 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6819 &zero_address_frag);
6821 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
6822 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
6823 &zero_address_frag);
6825 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
6826 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
6827 &zero_address_frag);
6829 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
6830 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
6831 &zero_address_frag);
6833 pseudo_func[FUNC_IPLT_RELOC].u.sym =
6834 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
6835 &zero_address_frag);
6837 /* Compute the table of best templates. We compute goodness as a
6838 base 4 value, in which each match counts for 3, each F counts
6839 for 2, each B counts for 1. This should maximize the number of
6840 F and B nops in the chosen bundles, which is good because these
6841 pipelines are least likely to be overcommitted. */
6842 for (i = 0; i < IA64_NUM_TYPES; ++i)
6843 for (j = 0; j < IA64_NUM_TYPES; ++j)
6844 for (k = 0; k < IA64_NUM_TYPES; ++k)
6847 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6850 if (match (t, i, 0))
6852 if (match (t, j, 1))
6854 if (match (t, k, 2))
6855 goodness = 3 + 3 + 3;
6857 goodness = 3 + 3 + extra_goodness (t, 2);
6859 else if (match (t, j, 2))
6860 goodness = 3 + 3 + extra_goodness (t, 1);
6864 goodness += extra_goodness (t, 1);
6865 goodness += extra_goodness (t, 2);
6868 else if (match (t, i, 1))
6870 if (match (t, j, 2))
6873 goodness = 3 + extra_goodness (t, 2);
6875 else if (match (t, i, 2))
6876 goodness = 3 + extra_goodness (t, 1);
6878 if (goodness > best)
6881 best_template[i][j][k] = t;
6886 for (i = 0; i < NUM_SLOTS; ++i)
6887 md.slot[i].user_template = -1;
6889 md.pseudo_hash = hash_new ();
6890 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6892 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6893 (void *) (pseudo_opcode + i));
6895 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6896 pseudo_opcode[i].name, err);
6899 md.reg_hash = hash_new ();
6900 md.dynreg_hash = hash_new ();
6901 md.const_hash = hash_new ();
6902 md.entry_hash = hash_new ();
6904 /* general registers: */
6907 for (i = 0; i < total; ++i)
6909 sprintf (name, "r%d", i - REG_GR);
6910 md.regsym[i] = declare_register (name, i);
6913 /* floating point registers: */
6915 for (; i < total; ++i)
6917 sprintf (name, "f%d", i - REG_FR);
6918 md.regsym[i] = declare_register (name, i);
6921 /* application registers: */
6924 for (; i < total; ++i)
6926 sprintf (name, "ar%d", i - REG_AR);
6927 md.regsym[i] = declare_register (name, i);
6930 /* control registers: */
6933 for (; i < total; ++i)
6935 sprintf (name, "cr%d", i - REG_CR);
6936 md.regsym[i] = declare_register (name, i);
6939 /* predicate registers: */
6941 for (; i < total; ++i)
6943 sprintf (name, "p%d", i - REG_P);
6944 md.regsym[i] = declare_register (name, i);
6947 /* branch registers: */
6949 for (; i < total; ++i)
6951 sprintf (name, "b%d", i - REG_BR);
6952 md.regsym[i] = declare_register (name, i);
6955 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6956 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6957 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6958 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6959 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6960 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6961 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6963 for (i = 0; i < NELEMS (indirect_reg); ++i)
6965 regnum = indirect_reg[i].regnum;
6966 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6969 /* define synonyms for application registers: */
6970 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6971 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6972 REG_AR + ar[i - REG_AR].regnum);
6974 /* define synonyms for control registers: */
6975 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6976 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6977 REG_CR + cr[i - REG_CR].regnum);
6979 declare_register ("gp", REG_GR + 1);
6980 declare_register ("sp", REG_GR + 12);
6981 declare_register ("rp", REG_BR + 0);
6983 /* pseudo-registers used to specify unwind info: */
6984 declare_register ("psp", REG_PSP);
6986 declare_register_set ("ret", 4, REG_GR + 8);
6987 declare_register_set ("farg", 8, REG_FR + 8);
6988 declare_register_set ("fret", 8, REG_FR + 8);
6990 for (i = 0; i < NELEMS (const_bits); ++i)
6992 err = hash_insert (md.const_hash, const_bits[i].name,
6993 (PTR) (const_bits + i));
6995 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6999 /* Set the architecture and machine depending on defaults and command line
7001 if (md.flags & EF_IA_64_ABI64)
7002 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7004 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7007 as_warn (_("Could not set architecture and machine"));
7009 /* Set the pointer size and pointer shift size depending on md.flags */
7011 if (md.flags & EF_IA_64_ABI64)
7013 md.pointer_size = 8; /* pointers are 8 bytes */
7014 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7018 md.pointer_size = 4; /* pointers are 4 bytes */
7019 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7022 md.mem_offset.hint = 0;
7025 md.entry_labels = NULL;
7028 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
7029 because that is called after md_parse_option which is where we do the
7030 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
7031 default endianness. */
7034 ia64_init (argc, argv)
7035 int argc ATTRIBUTE_UNUSED;
7036 char **argv ATTRIBUTE_UNUSED;
7038 md.flags = MD_FLAGS_DEFAULT;
7041 /* Return a string for the target object file format. */
7044 ia64_target_format ()
7046 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7048 if (md.flags & EF_IA_64_BE)
7050 if (md.flags & EF_IA_64_ABI64)
7051 #if defined(TE_AIX50)
7052 return "elf64-ia64-aix-big";
7053 #elif defined(TE_HPUX)
7054 return "elf64-ia64-hpux-big";
7056 return "elf64-ia64-big";
7059 #if defined(TE_AIX50)
7060 return "elf32-ia64-aix-big";
7061 #elif defined(TE_HPUX)
7062 return "elf32-ia64-hpux-big";
7064 return "elf32-ia64-big";
7069 if (md.flags & EF_IA_64_ABI64)
7071 return "elf64-ia64-aix-little";
7073 return "elf64-ia64-little";
7077 return "elf32-ia64-aix-little";
7079 return "elf32-ia64-little";
7084 return "unknown-format";
7088 ia64_end_of_source ()
7090 /* terminate insn group upon reaching end of file: */
7091 insn_group_break (1, 0, 0);
7093 /* emits slots we haven't written yet: */
7094 ia64_flush_insns ();
7096 bfd_set_private_flags (stdoutput, md.flags);
7098 md.mem_offset.hint = 0;
7104 if (md.qp.X_op == O_register)
7105 as_bad ("qualifying predicate not followed by instruction");
7106 md.qp.X_op = O_absent;
7108 if (ignore_input ())
7111 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7113 if (md.detect_dv && !md.explicit_mode)
7114 as_warn (_("Explicit stops are ignored in auto mode"));
7116 insn_group_break (1, 0, 0);
7120 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7122 static int defining_tag = 0;
7125 ia64_unrecognized_line (ch)
7131 expression (&md.qp);
7132 if (*input_line_pointer++ != ')')
7134 as_bad ("Expected ')'");
7137 if (md.qp.X_op != O_register)
7139 as_bad ("Qualifying predicate expected");
7142 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7144 as_bad ("Predicate register expected");
7150 if (md.manual_bundling)
7151 as_warn ("Found '{' when manual bundling is already turned on");
7153 CURR_SLOT.manual_bundling_on = 1;
7154 md.manual_bundling = 1;
7156 /* Bundling is only acceptable in explicit mode
7157 or when in default automatic mode. */
7158 if (md.detect_dv && !md.explicit_mode)
7160 if (!md.mode_explicitly_set
7161 && !md.default_explicit_mode)
7164 as_warn (_("Found '{' after explicit switch to automatic mode"));
7169 if (!md.manual_bundling)
7170 as_warn ("Found '}' when manual bundling is off");
7172 PREV_SLOT.manual_bundling_off = 1;
7173 md.manual_bundling = 0;
7175 /* switch back to automatic mode, if applicable */
7178 && !md.mode_explicitly_set
7179 && !md.default_explicit_mode)
7182 /* Allow '{' to follow on the same line. We also allow ";;", but that
7183 happens automatically because ';' is an end of line marker. */
7185 if (input_line_pointer[0] == '{')
7187 input_line_pointer++;
7188 return ia64_unrecognized_line ('{');
7191 demand_empty_rest_of_line ();
7201 if (md.qp.X_op == O_register)
7203 as_bad ("Tag must come before qualifying predicate.");
7207 /* This implements just enough of read_a_source_file in read.c to
7208 recognize labels. */
7209 if (is_name_beginner (*input_line_pointer))
7211 s = input_line_pointer;
7212 c = get_symbol_end ();
7214 else if (LOCAL_LABELS_FB
7215 && ISDIGIT (*input_line_pointer))
7218 while (ISDIGIT (*input_line_pointer))
7219 temp = (temp * 10) + *input_line_pointer++ - '0';
7220 fb_label_instance_inc (temp);
7221 s = fb_label_name (temp, 0);
7222 c = *input_line_pointer;
7231 /* Put ':' back for error messages' sake. */
7232 *input_line_pointer++ = ':';
7233 as_bad ("Expected ':'");
7240 /* Put ':' back for error messages' sake. */
7241 *input_line_pointer++ = ':';
7242 if (*input_line_pointer++ != ']')
7244 as_bad ("Expected ']'");
7249 as_bad ("Tag name expected");
7259 /* Not a valid line. */
7264 ia64_frob_label (sym)
7267 struct label_fix *fix;
7269 /* Tags need special handling since they are not bundle breaks like
7273 fix = obstack_alloc (¬es, sizeof (*fix));
7275 fix->next = CURR_SLOT.tag_fixups;
7276 CURR_SLOT.tag_fixups = fix;
7281 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7283 md.last_text_seg = now_seg;
7284 fix = obstack_alloc (¬es, sizeof (*fix));
7286 fix->next = CURR_SLOT.label_fixups;
7287 CURR_SLOT.label_fixups = fix;
7289 /* Keep track of how many code entry points we've seen. */
7290 if (md.path == md.maxpaths)
7293 md.entry_labels = (const char **)
7294 xrealloc ((void *) md.entry_labels,
7295 md.maxpaths * sizeof (char *));
7297 md.entry_labels[md.path++] = S_GET_NAME (sym);
7302 /* The HP-UX linker will give unresolved symbol errors for symbols
7303 that are declared but unused. This routine removes declared,
7304 unused symbols from an object. */
7306 ia64_frob_symbol (sym)
7309 if ((S_GET_SEGMENT (sym) == &bfd_und_section && ! symbol_used_p (sym) &&
7310 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7311 || (S_GET_SEGMENT (sym) == &bfd_abs_section
7312 && ! S_IS_EXTERNAL (sym)))
7319 ia64_flush_pending_output ()
7321 if (!md.keep_pending_output
7322 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7324 /* ??? This causes many unnecessary stop bits to be emitted.
7325 Unfortunately, it isn't clear if it is safe to remove this. */
7326 insn_group_break (1, 0, 0);
7327 ia64_flush_insns ();
7331 /* Do ia64-specific expression optimization. All that's done here is
7332 to transform index expressions that are either due to the indexing
7333 of rotating registers or due to the indexing of indirect register
7336 ia64_optimize_expr (l, op, r)
7345 if (l->X_op == O_register && r->X_op == O_constant)
7347 num_regs = (l->X_add_number >> 16);
7348 if ((unsigned) r->X_add_number >= num_regs)
7351 as_bad ("No current frame");
7353 as_bad ("Index out of range 0..%u", num_regs - 1);
7354 r->X_add_number = 0;
7356 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7359 else if (l->X_op == O_register && r->X_op == O_register)
7361 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
7362 || l->X_add_number == IND_MEM)
7364 as_bad ("Indirect register set name expected");
7365 l->X_add_number = IND_CPUID;
7368 l->X_op_symbol = md.regsym[l->X_add_number];
7369 l->X_add_number = r->X_add_number;
7377 ia64_parse_name (name, e)
7381 struct const_desc *cdesc;
7382 struct dynreg *dr = 0;
7383 unsigned int regnum;
7387 /* first see if NAME is a known register name: */
7388 sym = hash_find (md.reg_hash, name);
7391 e->X_op = O_register;
7392 e->X_add_number = S_GET_VALUE (sym);
7396 cdesc = hash_find (md.const_hash, name);
7399 e->X_op = O_constant;
7400 e->X_add_number = cdesc->value;
7404 /* check for inN, locN, or outN: */
7408 if (name[1] == 'n' && ISDIGIT (name[2]))
7416 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7424 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
7437 /* The name is inN, locN, or outN; parse the register number. */
7438 regnum = strtoul (name, &end, 10);
7439 if (end > name && *end == '\0')
7441 if ((unsigned) regnum >= dr->num_regs)
7444 as_bad ("No current frame");
7446 as_bad ("Register number out of range 0..%u",
7450 e->X_op = O_register;
7451 e->X_add_number = dr->base + regnum;
7456 if ((dr = hash_find (md.dynreg_hash, name)))
7458 /* We've got ourselves the name of a rotating register set.
7459 Store the base register number in the low 16 bits of
7460 X_add_number and the size of the register set in the top 16
7462 e->X_op = O_register;
7463 e->X_add_number = dr->base | (dr->num_regs << 16);
7469 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
7472 ia64_canonicalize_symbol_name (name)
7475 size_t len = strlen (name);
7476 if (len > 1 && name[len - 1] == '#')
7477 name[len - 1] = '\0';
7481 /* Return true if idesc is a conditional branch instruction. This excludes
7482 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
7483 because they always read/write resources regardless of the value of the
7484 qualifying predicate. br.ia must always use p0, and hence is always
7485 taken. Thus this function returns true for branches which can fall
7486 through, and which use no resources if they do fall through. */
7489 is_conditional_branch (idesc)
7490 struct ia64_opcode *idesc;
7492 /* br is a conditional branch. Everything that starts with br. except
7493 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
7494 Everything that starts with brl is a conditional branch. */
7495 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
7496 && (idesc->name[2] == '\0'
7497 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
7498 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
7499 || idesc->name[2] == 'l'
7500 /* br.cond, br.call, br.clr */
7501 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
7502 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
7503 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
7506 /* Return whether the given opcode is a taken branch. If there's any doubt,
7510 is_taken_branch (idesc)
7511 struct ia64_opcode *idesc;
7513 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
7514 || strncmp (idesc->name, "br.ia", 5) == 0);
7517 /* Return whether the given opcode is an interruption or rfi. If there's any
7518 doubt, returns zero. */
7521 is_interruption_or_rfi (idesc)
7522 struct ia64_opcode *idesc;
7524 if (strcmp (idesc->name, "rfi") == 0)
7529 /* Returns the index of the given dependency in the opcode's list of chks, or
7530 -1 if there is no dependency. */
7533 depends_on (depind, idesc)
7535 struct ia64_opcode *idesc;
7538 const struct ia64_opcode_dependency *dep = idesc->dependencies;
7539 for (i = 0; i < dep->nchks; i++)
7541 if (depind == DEP (dep->chks[i]))
7547 /* Determine a set of specific resources used for a particular resource
7548 class. Returns the number of specific resources identified For those
7549 cases which are not determinable statically, the resource returned is
7552 Meanings of value in 'NOTE':
7553 1) only read/write when the register number is explicitly encoded in the
7555 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7556 accesses CFM when qualifying predicate is in the rotating region.
7557 3) general register value is used to specify an indirect register; not
7558 determinable statically.
7559 4) only read the given resource when bits 7:0 of the indirect index
7560 register value does not match the register number of the resource; not
7561 determinable statically.
7562 5) all rules are implementation specific.
7563 6) only when both the index specified by the reader and the index specified
7564 by the writer have the same value in bits 63:61; not determinable
7566 7) only access the specified resource when the corresponding mask bit is
7568 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7569 only read when these insns reference FR2-31
7570 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7571 written when these insns write FR32-127
7572 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7574 11) The target predicates are written independently of PR[qp], but source
7575 registers are only read if PR[qp] is true. Since the state of PR[qp]
7576 cannot statically be determined, all source registers are marked used.
7577 12) This insn only reads the specified predicate register when that
7578 register is the PR[qp].
7579 13) This reference to ld-c only applies to teh GR whose value is loaded
7580 with data returned from memory, not the post-incremented address register.
7581 14) The RSE resource includes the implementation-specific RSE internal
7582 state resources. At least one (and possibly more) of these resources are
7583 read by each instruction listed in IC:rse-readers. At least one (and
7584 possibly more) of these resources are written by each insn listed in
7586 15+16) Represents reserved instructions, which the assembler does not
7589 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7590 this code; there are no dependency violations based on memory access.
7593 #define MAX_SPECS 256
7598 specify_resource (dep, idesc, type, specs, note, path)
7599 const struct ia64_dependency *dep;
7600 struct ia64_opcode *idesc;
7601 int type; /* is this a DV chk or a DV reg? */
7602 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7603 int note; /* resource note for this insn's usage */
7604 int path; /* which execution path to examine */
7611 if (dep->mode == IA64_DV_WAW
7612 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7613 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7616 /* template for any resources we identify */
7617 tmpl.dependency = dep;
7619 tmpl.insn_srlz = tmpl.data_srlz = 0;
7620 tmpl.qp_regno = CURR_SLOT.qp_regno;
7621 tmpl.link_to_qp_branch = 1;
7622 tmpl.mem_offset.hint = 0;
7625 tmpl.cmp_type = CMP_NONE;
7628 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7629 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7630 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7632 /* we don't need to track these */
7633 if (dep->semantics == IA64_DVS_NONE)
7636 switch (dep->specifier)
7641 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7643 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7644 if (regno >= 0 && regno <= 7)
7646 specs[count] = tmpl;
7647 specs[count++].index = regno;
7653 for (i = 0; i < 8; i++)
7655 specs[count] = tmpl;
7656 specs[count++].index = i;
7665 case IA64_RS_AR_UNAT:
7666 /* This is a mov =AR or mov AR= instruction. */
7667 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7669 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7670 if (regno == AR_UNAT)
7672 specs[count++] = tmpl;
7677 /* This is a spill/fill, or other instruction that modifies the
7680 /* Unless we can determine the specific bits used, mark the whole
7681 thing; bits 8:3 of the memory address indicate the bit used in
7682 UNAT. The .mem.offset hint may be used to eliminate a small
7683 subset of conflicts. */
7684 specs[count] = tmpl;
7685 if (md.mem_offset.hint)
7688 fprintf (stderr, " Using hint for spill/fill\n");
7689 /* The index isn't actually used, just set it to something
7690 approximating the bit index. */
7691 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7692 specs[count].mem_offset.hint = 1;
7693 specs[count].mem_offset.offset = md.mem_offset.offset;
7694 specs[count++].mem_offset.base = md.mem_offset.base;
7698 specs[count++].specific = 0;
7706 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7708 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7709 if ((regno >= 8 && regno <= 15)
7710 || (regno >= 20 && regno <= 23)
7711 || (regno >= 31 && regno <= 39)
7712 || (regno >= 41 && regno <= 47)
7713 || (regno >= 67 && regno <= 111))
7715 specs[count] = tmpl;
7716 specs[count++].index = regno;
7729 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7731 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7732 if ((regno >= 48 && regno <= 63)
7733 || (regno >= 112 && regno <= 127))
7735 specs[count] = tmpl;
7736 specs[count++].index = regno;
7742 for (i = 48; i < 64; i++)
7744 specs[count] = tmpl;
7745 specs[count++].index = i;
7747 for (i = 112; i < 128; i++)
7749 specs[count] = tmpl;
7750 specs[count++].index = i;
7768 for (i = 0; i < idesc->num_outputs; i++)
7769 if (idesc->operands[i] == IA64_OPND_B1
7770 || idesc->operands[i] == IA64_OPND_B2)
7772 specs[count] = tmpl;
7773 specs[count++].index =
7774 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7779 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7780 if (idesc->operands[i] == IA64_OPND_B1
7781 || idesc->operands[i] == IA64_OPND_B2)
7783 specs[count] = tmpl;
7784 specs[count++].index =
7785 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7791 case IA64_RS_CPUID: /* four or more registers */
7794 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7796 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7797 if (regno >= 0 && regno < NELEMS (gr_values)
7800 specs[count] = tmpl;
7801 specs[count++].index = gr_values[regno].value & 0xFF;
7805 specs[count] = tmpl;
7806 specs[count++].specific = 0;
7816 case IA64_RS_DBR: /* four or more registers */
7819 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7821 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7822 if (regno >= 0 && regno < NELEMS (gr_values)
7825 specs[count] = tmpl;
7826 specs[count++].index = gr_values[regno].value & 0xFF;
7830 specs[count] = tmpl;
7831 specs[count++].specific = 0;
7835 else if (note == 0 && !rsrc_write)
7837 specs[count] = tmpl;
7838 specs[count++].specific = 0;
7846 case IA64_RS_IBR: /* four or more registers */
7849 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7851 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7852 if (regno >= 0 && regno < NELEMS (gr_values)
7855 specs[count] = tmpl;
7856 specs[count++].index = gr_values[regno].value & 0xFF;
7860 specs[count] = tmpl;
7861 specs[count++].specific = 0;
7874 /* These are implementation specific. Force all references to
7875 conflict with all other references. */
7876 specs[count] = tmpl;
7877 specs[count++].specific = 0;
7885 case IA64_RS_PKR: /* 16 or more registers */
7886 if (note == 3 || note == 4)
7888 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7890 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7891 if (regno >= 0 && regno < NELEMS (gr_values)
7896 specs[count] = tmpl;
7897 specs[count++].index = gr_values[regno].value & 0xFF;
7900 for (i = 0; i < NELEMS (gr_values); i++)
7902 /* Uses all registers *except* the one in R3. */
7903 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7905 specs[count] = tmpl;
7906 specs[count++].index = i;
7912 specs[count] = tmpl;
7913 specs[count++].specific = 0;
7920 specs[count] = tmpl;
7921 specs[count++].specific = 0;
7925 case IA64_RS_PMC: /* four or more registers */
7928 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7929 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7932 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7934 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7935 if (regno >= 0 && regno < NELEMS (gr_values)
7938 specs[count] = tmpl;
7939 specs[count++].index = gr_values[regno].value & 0xFF;
7943 specs[count] = tmpl;
7944 specs[count++].specific = 0;
7954 case IA64_RS_PMD: /* four or more registers */
7957 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7959 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7960 if (regno >= 0 && regno < NELEMS (gr_values)
7963 specs[count] = tmpl;
7964 specs[count++].index = gr_values[regno].value & 0xFF;
7968 specs[count] = tmpl;
7969 specs[count++].specific = 0;
7979 case IA64_RS_RR: /* eight registers */
7982 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7984 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7985 if (regno >= 0 && regno < NELEMS (gr_values)
7988 specs[count] = tmpl;
7989 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7993 specs[count] = tmpl;
7994 specs[count++].specific = 0;
7998 else if (note == 0 && !rsrc_write)
8000 specs[count] = tmpl;
8001 specs[count++].specific = 0;
8009 case IA64_RS_CR_IRR:
8012 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8013 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8015 && idesc->operands[1] == IA64_OPND_CR3
8018 for (i = 0; i < 4; i++)
8020 specs[count] = tmpl;
8021 specs[count++].index = CR_IRR0 + i;
8027 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8028 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8030 && regno <= CR_IRR3)
8032 specs[count] = tmpl;
8033 specs[count++].index = regno;
8042 case IA64_RS_CR_LRR:
8049 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8050 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8051 && (regno == CR_LRR0 || regno == CR_LRR1))
8053 specs[count] = tmpl;
8054 specs[count++].index = regno;
8062 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8064 specs[count] = tmpl;
8065 specs[count++].index =
8066 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8081 else if (rsrc_write)
8083 if (dep->specifier == IA64_RS_FRb
8084 && idesc->operands[0] == IA64_OPND_F1)
8086 specs[count] = tmpl;
8087 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8092 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8094 if (idesc->operands[i] == IA64_OPND_F2
8095 || idesc->operands[i] == IA64_OPND_F3
8096 || idesc->operands[i] == IA64_OPND_F4)
8098 specs[count] = tmpl;
8099 specs[count++].index =
8100 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8109 /* This reference applies only to the GR whose value is loaded with
8110 data returned from memory. */
8111 specs[count] = tmpl;
8112 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8118 for (i = 0; i < idesc->num_outputs; i++)
8119 if (idesc->operands[i] == IA64_OPND_R1
8120 || idesc->operands[i] == IA64_OPND_R2
8121 || idesc->operands[i] == IA64_OPND_R3)
8123 specs[count] = tmpl;
8124 specs[count++].index =
8125 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8127 if (idesc->flags & IA64_OPCODE_POSTINC)
8128 for (i = 0; i < NELEMS (idesc->operands); i++)
8129 if (idesc->operands[i] == IA64_OPND_MR3)
8131 specs[count] = tmpl;
8132 specs[count++].index =
8133 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8138 /* Look for anything that reads a GR. */
8139 for (i = 0; i < NELEMS (idesc->operands); i++)
8141 if (idesc->operands[i] == IA64_OPND_MR3
8142 || idesc->operands[i] == IA64_OPND_CPUID_R3
8143 || idesc->operands[i] == IA64_OPND_DBR_R3
8144 || idesc->operands[i] == IA64_OPND_IBR_R3
8145 || idesc->operands[i] == IA64_OPND_MSR_R3
8146 || idesc->operands[i] == IA64_OPND_PKR_R3
8147 || idesc->operands[i] == IA64_OPND_PMC_R3
8148 || idesc->operands[i] == IA64_OPND_PMD_R3
8149 || idesc->operands[i] == IA64_OPND_RR_R3
8150 || ((i >= idesc->num_outputs)
8151 && (idesc->operands[i] == IA64_OPND_R1
8152 || idesc->operands[i] == IA64_OPND_R2
8153 || idesc->operands[i] == IA64_OPND_R3
8154 /* addl source register. */
8155 || idesc->operands[i] == IA64_OPND_R3_2)))
8157 specs[count] = tmpl;
8158 specs[count++].index =
8159 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8170 /* This is the same as IA64_RS_PRr, except that the register range is
8171 from 1 - 15, and there are no rotating register reads/writes here. */
8175 for (i = 1; i < 16; i++)
8177 specs[count] = tmpl;
8178 specs[count++].index = i;
8184 /* Mark only those registers indicated by the mask. */
8187 mask = CURR_SLOT.opnd[2].X_add_number;
8188 for (i = 1; i < 16; i++)
8189 if (mask & ((valueT) 1 << i))
8191 specs[count] = tmpl;
8192 specs[count++].index = i;
8200 else if (note == 11) /* note 11 implies note 1 as well */
8204 for (i = 0; i < idesc->num_outputs; i++)
8206 if (idesc->operands[i] == IA64_OPND_P1
8207 || idesc->operands[i] == IA64_OPND_P2)
8209 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8210 if (regno >= 1 && regno < 16)
8212 specs[count] = tmpl;
8213 specs[count++].index = regno;
8223 else if (note == 12)
8225 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8227 specs[count] = tmpl;
8228 specs[count++].index = CURR_SLOT.qp_regno;
8235 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8236 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8237 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8238 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8240 if ((idesc->operands[0] == IA64_OPND_P1
8241 || idesc->operands[0] == IA64_OPND_P2)
8242 && p1 >= 1 && p1 < 16)
8244 specs[count] = tmpl;
8245 specs[count].cmp_type =
8246 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8247 specs[count++].index = p1;
8249 if ((idesc->operands[1] == IA64_OPND_P1
8250 || idesc->operands[1] == IA64_OPND_P2)
8251 && p2 >= 1 && p2 < 16)
8253 specs[count] = tmpl;
8254 specs[count].cmp_type =
8255 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8256 specs[count++].index = p2;
8261 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8263 specs[count] = tmpl;
8264 specs[count++].index = CURR_SLOT.qp_regno;
8266 if (idesc->operands[1] == IA64_OPND_PR)
8268 for (i = 1; i < 16; i++)
8270 specs[count] = tmpl;
8271 specs[count++].index = i;
8282 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8283 simplified cases of this. */
8287 for (i = 16; i < 63; i++)
8289 specs[count] = tmpl;
8290 specs[count++].index = i;
8296 /* Mark only those registers indicated by the mask. */
8298 && idesc->operands[0] == IA64_OPND_PR)
8300 mask = CURR_SLOT.opnd[2].X_add_number;
8301 if (mask & ((valueT) 1 << 16))
8302 for (i = 16; i < 63; i++)
8304 specs[count] = tmpl;
8305 specs[count++].index = i;
8309 && idesc->operands[0] == IA64_OPND_PR_ROT)
8311 for (i = 16; i < 63; i++)
8313 specs[count] = tmpl;
8314 specs[count++].index = i;
8322 else if (note == 11) /* note 11 implies note 1 as well */
8326 for (i = 0; i < idesc->num_outputs; i++)
8328 if (idesc->operands[i] == IA64_OPND_P1
8329 || idesc->operands[i] == IA64_OPND_P2)
8331 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8332 if (regno >= 16 && regno < 63)
8334 specs[count] = tmpl;
8335 specs[count++].index = regno;
8345 else if (note == 12)
8347 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8349 specs[count] = tmpl;
8350 specs[count++].index = CURR_SLOT.qp_regno;
8357 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8358 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8359 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8360 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8362 if ((idesc->operands[0] == IA64_OPND_P1
8363 || idesc->operands[0] == IA64_OPND_P2)
8364 && p1 >= 16 && p1 < 63)
8366 specs[count] = tmpl;
8367 specs[count].cmp_type =
8368 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8369 specs[count++].index = p1;
8371 if ((idesc->operands[1] == IA64_OPND_P1
8372 || idesc->operands[1] == IA64_OPND_P2)
8373 && p2 >= 16 && p2 < 63)
8375 specs[count] = tmpl;
8376 specs[count].cmp_type =
8377 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8378 specs[count++].index = p2;
8383 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8385 specs[count] = tmpl;
8386 specs[count++].index = CURR_SLOT.qp_regno;
8388 if (idesc->operands[1] == IA64_OPND_PR)
8390 for (i = 16; i < 63; i++)
8392 specs[count] = tmpl;
8393 specs[count++].index = i;
8405 /* Verify that the instruction is using the PSR bit indicated in
8409 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
8411 if (dep->regindex < 6)
8413 specs[count++] = tmpl;
8416 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
8418 if (dep->regindex < 32
8419 || dep->regindex == 35
8420 || dep->regindex == 36
8421 || (!rsrc_write && dep->regindex == PSR_CPL))
8423 specs[count++] = tmpl;
8426 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
8428 if (dep->regindex < 32
8429 || dep->regindex == 35
8430 || dep->regindex == 36
8431 || (rsrc_write && dep->regindex == PSR_CPL))
8433 specs[count++] = tmpl;
8438 /* Several PSR bits have very specific dependencies. */
8439 switch (dep->regindex)
8442 specs[count++] = tmpl;
8447 specs[count++] = tmpl;
8451 /* Only certain CR accesses use PSR.ic */
8452 if (idesc->operands[0] == IA64_OPND_CR3
8453 || idesc->operands[1] == IA64_OPND_CR3)
8456 ((idesc->operands[0] == IA64_OPND_CR3)
8459 CURR_SLOT.opnd[index].X_add_number - REG_CR;
8474 specs[count++] = tmpl;
8483 specs[count++] = tmpl;
8487 /* Only some AR accesses use cpl */
8488 if (idesc->operands[0] == IA64_OPND_AR3
8489 || idesc->operands[1] == IA64_OPND_AR3)
8492 ((idesc->operands[0] == IA64_OPND_AR3)
8495 CURR_SLOT.opnd[index].X_add_number - REG_AR;
8502 && regno <= AR_K7))))
8504 specs[count++] = tmpl;
8509 specs[count++] = tmpl;
8519 if (idesc->operands[0] == IA64_OPND_IMMU24)
8521 mask = CURR_SLOT.opnd[0].X_add_number;
8527 if (mask & ((valueT) 1 << dep->regindex))
8529 specs[count++] = tmpl;
8534 int min = dep->regindex == PSR_DFL ? 2 : 32;
8535 int max = dep->regindex == PSR_DFL ? 31 : 127;
8536 /* dfh is read on FR32-127; dfl is read on FR2-31 */
8537 for (i = 0; i < NELEMS (idesc->operands); i++)
8539 if (idesc->operands[i] == IA64_OPND_F1
8540 || idesc->operands[i] == IA64_OPND_F2
8541 || idesc->operands[i] == IA64_OPND_F3
8542 || idesc->operands[i] == IA64_OPND_F4)
8544 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8545 if (reg >= min && reg <= max)
8547 specs[count++] = tmpl;
8554 int min = dep->regindex == PSR_MFL ? 2 : 32;
8555 int max = dep->regindex == PSR_MFL ? 31 : 127;
8556 /* mfh is read on writes to FR32-127; mfl is read on writes to
8558 for (i = 0; i < idesc->num_outputs; i++)
8560 if (idesc->operands[i] == IA64_OPND_F1)
8562 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8563 if (reg >= min && reg <= max)
8565 specs[count++] = tmpl;
8570 else if (note == 10)
8572 for (i = 0; i < NELEMS (idesc->operands); i++)
8574 if (idesc->operands[i] == IA64_OPND_R1
8575 || idesc->operands[i] == IA64_OPND_R2
8576 || idesc->operands[i] == IA64_OPND_R3)
8578 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8579 if (regno >= 16 && regno <= 31)
8581 specs[count++] = tmpl;
8592 case IA64_RS_AR_FPSR:
8593 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8595 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8596 if (regno == AR_FPSR)
8598 specs[count++] = tmpl;
8603 specs[count++] = tmpl;
8608 /* Handle all AR[REG] resources */
8609 if (note == 0 || note == 1)
8611 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8612 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8613 && regno == dep->regindex)
8615 specs[count++] = tmpl;
8617 /* other AR[REG] resources may be affected by AR accesses */
8618 else if (idesc->operands[0] == IA64_OPND_AR3)
8621 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8622 switch (dep->regindex)
8628 if (regno == AR_BSPSTORE)
8630 specs[count++] = tmpl;
8634 (regno == AR_BSPSTORE
8635 || regno == AR_RNAT))
8637 specs[count++] = tmpl;
8642 else if (idesc->operands[1] == IA64_OPND_AR3)
8645 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8646 switch (dep->regindex)
8651 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8653 specs[count++] = tmpl;
8660 specs[count++] = tmpl;
8670 /* Handle all CR[REG] resources */
8671 if (note == 0 || note == 1)
8673 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8675 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8676 if (regno == dep->regindex)
8678 specs[count++] = tmpl;
8680 else if (!rsrc_write)
8682 /* Reads from CR[IVR] affect other resources. */
8683 if (regno == CR_IVR)
8685 if ((dep->regindex >= CR_IRR0
8686 && dep->regindex <= CR_IRR3)
8687 || dep->regindex == CR_TPR)
8689 specs[count++] = tmpl;
8696 specs[count++] = tmpl;
8705 case IA64_RS_INSERVICE:
8706 /* look for write of EOI (67) or read of IVR (65) */
8707 if ((idesc->operands[0] == IA64_OPND_CR3
8708 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8709 || (idesc->operands[1] == IA64_OPND_CR3
8710 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8712 specs[count++] = tmpl;
8719 specs[count++] = tmpl;
8730 specs[count++] = tmpl;
8734 /* Check if any of the registers accessed are in the rotating region.
8735 mov to/from pr accesses CFM only when qp_regno is in the rotating
8737 for (i = 0; i < NELEMS (idesc->operands); i++)
8739 if (idesc->operands[i] == IA64_OPND_R1
8740 || idesc->operands[i] == IA64_OPND_R2
8741 || idesc->operands[i] == IA64_OPND_R3)
8743 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8744 /* Assumes that md.rot.num_regs is always valid */
8745 if (md.rot.num_regs > 0
8747 && num < 31 + md.rot.num_regs)
8749 specs[count] = tmpl;
8750 specs[count++].specific = 0;
8753 else if (idesc->operands[i] == IA64_OPND_F1
8754 || idesc->operands[i] == IA64_OPND_F2
8755 || idesc->operands[i] == IA64_OPND_F3
8756 || idesc->operands[i] == IA64_OPND_F4)
8758 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8761 specs[count] = tmpl;
8762 specs[count++].specific = 0;
8765 else if (idesc->operands[i] == IA64_OPND_P1
8766 || idesc->operands[i] == IA64_OPND_P2)
8768 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8771 specs[count] = tmpl;
8772 specs[count++].specific = 0;
8776 if (CURR_SLOT.qp_regno > 15)
8778 specs[count] = tmpl;
8779 specs[count++].specific = 0;
8784 /* This is the same as IA64_RS_PRr, except simplified to account for
8785 the fact that there is only one register. */
8789 specs[count++] = tmpl;
8794 if (idesc->operands[2] == IA64_OPND_IMM17)
8795 mask = CURR_SLOT.opnd[2].X_add_number;
8796 if (mask & ((valueT) 1 << 63))
8797 specs[count++] = tmpl;
8799 else if (note == 11)
8801 if ((idesc->operands[0] == IA64_OPND_P1
8802 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8803 || (idesc->operands[1] == IA64_OPND_P2
8804 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8806 specs[count++] = tmpl;
8809 else if (note == 12)
8811 if (CURR_SLOT.qp_regno == 63)
8813 specs[count++] = tmpl;
8820 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8821 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8822 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8823 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8826 && (idesc->operands[0] == IA64_OPND_P1
8827 || idesc->operands[0] == IA64_OPND_P2))
8829 specs[count] = tmpl;
8830 specs[count++].cmp_type =
8831 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8834 && (idesc->operands[1] == IA64_OPND_P1
8835 || idesc->operands[1] == IA64_OPND_P2))
8837 specs[count] = tmpl;
8838 specs[count++].cmp_type =
8839 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8844 if (CURR_SLOT.qp_regno == 63)
8846 specs[count++] = tmpl;
8857 /* FIXME we can identify some individual RSE written resources, but RSE
8858 read resources have not yet been completely identified, so for now
8859 treat RSE as a single resource */
8860 if (strncmp (idesc->name, "mov", 3) == 0)
8864 if (idesc->operands[0] == IA64_OPND_AR3
8865 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8867 specs[count] = tmpl;
8868 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8873 if (idesc->operands[0] == IA64_OPND_AR3)
8875 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8876 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8878 specs[count++] = tmpl;
8881 else if (idesc->operands[1] == IA64_OPND_AR3)
8883 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8884 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8885 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8887 specs[count++] = tmpl;
8894 specs[count++] = tmpl;
8899 /* FIXME -- do any of these need to be non-specific? */
8900 specs[count++] = tmpl;
8904 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8911 /* Clear branch flags on marked resources. This breaks the link between the
8912 QP of the marking instruction and a subsequent branch on the same QP. */
8915 clear_qp_branch_flag (mask)
8919 for (i = 0; i < regdepslen; i++)
8921 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8922 if ((bit & mask) != 0)
8924 regdeps[i].link_to_qp_branch = 0;
8929 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
8930 any mutexes which contain one of the PRs and create new ones when
8934 update_qp_mutex (valueT mask)
8940 while (i < qp_mutexeslen)
8942 if ((qp_mutexes[i].prmask & mask) != 0)
8944 /* If it destroys and creates the same mutex, do nothing. */
8945 if (qp_mutexes[i].prmask == mask
8946 && qp_mutexes[i].path == md.path)
8957 fprintf (stderr, " Clearing mutex relation");
8958 print_prmask (qp_mutexes[i].prmask);
8959 fprintf (stderr, "\n");
8962 /* Deal with the old mutex with more than 3+ PRs only if
8963 the new mutex on the same execution path with it.
8965 FIXME: The 3+ mutex support is incomplete.
8966 dot_pred_rel () may be a better place to fix it. */
8967 if (qp_mutexes[i].path == md.path)
8969 /* If it is a proper subset of the mutex, create a
8972 && (qp_mutexes[i].prmask & mask) == mask)
8975 qp_mutexes[i].prmask &= ~mask;
8976 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
8978 /* Modify the mutex if there are more than one
8986 /* Remove the mutex. */
8987 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8995 add_qp_mutex (mask);
9000 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9002 Any changes to a PR clears the mutex relations which include that PR. */
9005 clear_qp_mutex (mask)
9011 while (i < qp_mutexeslen)
9013 if ((qp_mutexes[i].prmask & mask) != 0)
9017 fprintf (stderr, " Clearing mutex relation");
9018 print_prmask (qp_mutexes[i].prmask);
9019 fprintf (stderr, "\n");
9021 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9028 /* Clear implies relations which contain PRs in the given masks.
9029 P1_MASK indicates the source of the implies relation, while P2_MASK
9030 indicates the implied PR. */
9033 clear_qp_implies (p1_mask, p2_mask)
9040 while (i < qp_implieslen)
9042 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9043 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9046 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9047 qp_implies[i].p1, qp_implies[i].p2);
9048 qp_implies[i] = qp_implies[--qp_implieslen];
9055 /* Add the PRs specified to the list of implied relations. */
9058 add_qp_imply (p1, p2)
9065 /* p0 is not meaningful here. */
9066 if (p1 == 0 || p2 == 0)
9072 /* If it exists already, ignore it. */
9073 for (i = 0; i < qp_implieslen; i++)
9075 if (qp_implies[i].p1 == p1
9076 && qp_implies[i].p2 == p2
9077 && qp_implies[i].path == md.path
9078 && !qp_implies[i].p2_branched)
9082 if (qp_implieslen == qp_impliestotlen)
9084 qp_impliestotlen += 20;
9085 qp_implies = (struct qp_imply *)
9086 xrealloc ((void *) qp_implies,
9087 qp_impliestotlen * sizeof (struct qp_imply));
9090 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9091 qp_implies[qp_implieslen].p1 = p1;
9092 qp_implies[qp_implieslen].p2 = p2;
9093 qp_implies[qp_implieslen].path = md.path;
9094 qp_implies[qp_implieslen++].p2_branched = 0;
9096 /* Add in the implied transitive relations; for everything that p2 implies,
9097 make p1 imply that, too; for everything that implies p1, make it imply p2
9099 for (i = 0; i < qp_implieslen; i++)
9101 if (qp_implies[i].p1 == p2)
9102 add_qp_imply (p1, qp_implies[i].p2);
9103 if (qp_implies[i].p2 == p1)
9104 add_qp_imply (qp_implies[i].p1, p2);
9106 /* Add in mutex relations implied by this implies relation; for each mutex
9107 relation containing p2, duplicate it and replace p2 with p1. */
9108 bit = (valueT) 1 << p1;
9109 mask = (valueT) 1 << p2;
9110 for (i = 0; i < qp_mutexeslen; i++)
9112 if (qp_mutexes[i].prmask & mask)
9113 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9117 /* Add the PRs specified in the mask to the mutex list; this means that only
9118 one of the PRs can be true at any time. PR0 should never be included in
9128 if (qp_mutexeslen == qp_mutexestotlen)
9130 qp_mutexestotlen += 20;
9131 qp_mutexes = (struct qpmutex *)
9132 xrealloc ((void *) qp_mutexes,
9133 qp_mutexestotlen * sizeof (struct qpmutex));
9137 fprintf (stderr, " Registering mutex on");
9138 print_prmask (mask);
9139 fprintf (stderr, "\n");
9141 qp_mutexes[qp_mutexeslen].path = md.path;
9142 qp_mutexes[qp_mutexeslen++].prmask = mask;
9146 has_suffix_p (name, suffix)
9150 size_t namelen = strlen (name);
9151 size_t sufflen = strlen (suffix);
9153 if (namelen <= sufflen)
9155 return strcmp (name + namelen - sufflen, suffix) == 0;
9159 clear_register_values ()
9163 fprintf (stderr, " Clearing register values\n");
9164 for (i = 1; i < NELEMS (gr_values); i++)
9165 gr_values[i].known = 0;
9168 /* Keep track of register values/changes which affect DV tracking.
9170 optimization note: should add a flag to classes of insns where otherwise we
9171 have to examine a group of strings to identify them. */
9174 note_register_values (idesc)
9175 struct ia64_opcode *idesc;
9177 valueT qp_changemask = 0;
9180 /* Invalidate values for registers being written to. */
9181 for (i = 0; i < idesc->num_outputs; i++)
9183 if (idesc->operands[i] == IA64_OPND_R1
9184 || idesc->operands[i] == IA64_OPND_R2
9185 || idesc->operands[i] == IA64_OPND_R3)
9187 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9188 if (regno > 0 && regno < NELEMS (gr_values))
9189 gr_values[regno].known = 0;
9191 else if (idesc->operands[i] == IA64_OPND_R3_2)
9193 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9194 if (regno > 0 && regno < 4)
9195 gr_values[regno].known = 0;
9197 else if (idesc->operands[i] == IA64_OPND_P1
9198 || idesc->operands[i] == IA64_OPND_P2)
9200 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9201 qp_changemask |= (valueT) 1 << regno;
9203 else if (idesc->operands[i] == IA64_OPND_PR)
9205 if (idesc->operands[2] & (valueT) 0x10000)
9206 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9208 qp_changemask = idesc->operands[2];
9211 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9213 if (idesc->operands[1] & ((valueT) 1 << 43))
9214 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9216 qp_changemask = idesc->operands[1];
9217 qp_changemask &= ~(valueT) 0xFFFF;
9222 /* Always clear qp branch flags on any PR change. */
9223 /* FIXME there may be exceptions for certain compares. */
9224 clear_qp_branch_flag (qp_changemask);
9226 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9227 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9229 qp_changemask |= ~(valueT) 0xFFFF;
9230 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9232 for (i = 32; i < 32 + md.rot.num_regs; i++)
9233 gr_values[i].known = 0;
9235 clear_qp_mutex (qp_changemask);
9236 clear_qp_implies (qp_changemask, qp_changemask);
9238 /* After a call, all register values are undefined, except those marked
9240 else if (strncmp (idesc->name, "br.call", 6) == 0
9241 || strncmp (idesc->name, "brl.call", 7) == 0)
9243 /* FIXME keep GR values which are marked as "safe_across_calls" */
9244 clear_register_values ();
9245 clear_qp_mutex (~qp_safe_across_calls);
9246 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9247 clear_qp_branch_flag (~qp_safe_across_calls);
9249 else if (is_interruption_or_rfi (idesc)
9250 || is_taken_branch (idesc))
9252 clear_register_values ();
9253 clear_qp_mutex (~(valueT) 0);
9254 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9256 /* Look for mutex and implies relations. */
9257 else if ((idesc->operands[0] == IA64_OPND_P1
9258 || idesc->operands[0] == IA64_OPND_P2)
9259 && (idesc->operands[1] == IA64_OPND_P1
9260 || idesc->operands[1] == IA64_OPND_P2))
9262 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9263 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9264 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9265 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9267 /* If both PRs are PR0, we can't really do anything. */
9268 if (p1 == 0 && p2 == 0)
9271 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9273 /* In general, clear mutexes and implies which include P1 or P2,
9274 with the following exceptions. */
9275 else if (has_suffix_p (idesc->name, ".or.andcm")
9276 || has_suffix_p (idesc->name, ".and.orcm"))
9278 clear_qp_implies (p2mask, p1mask);
9280 else if (has_suffix_p (idesc->name, ".andcm")
9281 || has_suffix_p (idesc->name, ".and"))
9283 clear_qp_implies (0, p1mask | p2mask);
9285 else if (has_suffix_p (idesc->name, ".orcm")
9286 || has_suffix_p (idesc->name, ".or"))
9288 clear_qp_mutex (p1mask | p2mask);
9289 clear_qp_implies (p1mask | p2mask, 0);
9295 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9297 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9298 if (p1 == 0 || p2 == 0)
9299 clear_qp_mutex (p1mask | p2mask);
9301 added = update_qp_mutex (p1mask | p2mask);
9303 if (CURR_SLOT.qp_regno == 0
9304 || has_suffix_p (idesc->name, ".unc"))
9306 if (added == 0 && p1 && p2)
9307 add_qp_mutex (p1mask | p2mask);
9308 if (CURR_SLOT.qp_regno != 0)
9311 add_qp_imply (p1, CURR_SLOT.qp_regno);
9313 add_qp_imply (p2, CURR_SLOT.qp_regno);
9318 /* Look for mov imm insns into GRs. */
9319 else if (idesc->operands[0] == IA64_OPND_R1
9320 && (idesc->operands[1] == IA64_OPND_IMM22
9321 || idesc->operands[1] == IA64_OPND_IMMU64)
9322 && (strcmp (idesc->name, "mov") == 0
9323 || strcmp (idesc->name, "movl") == 0))
9325 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9326 if (regno > 0 && regno < NELEMS (gr_values))
9328 gr_values[regno].known = 1;
9329 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9330 gr_values[regno].path = md.path;
9333 fprintf (stderr, " Know gr%d = ", regno);
9334 fprintf_vma (stderr, gr_values[regno].value);
9335 fputs ("\n", stderr);
9341 clear_qp_mutex (qp_changemask);
9342 clear_qp_implies (qp_changemask, qp_changemask);
9346 /* Return whether the given predicate registers are currently mutex. */
9349 qp_mutex (p1, p2, path)
9359 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9360 for (i = 0; i < qp_mutexeslen; i++)
9362 if (qp_mutexes[i].path >= path
9363 && (qp_mutexes[i].prmask & mask) == mask)
9370 /* Return whether the given resource is in the given insn's list of chks
9371 Return 1 if the conflict is absolutely determined, 2 if it's a potential
9375 resources_match (rs, idesc, note, qp_regno, path)
9377 struct ia64_opcode *idesc;
9382 struct rsrc specs[MAX_SPECS];
9385 /* If the marked resource's qp_regno and the given qp_regno are mutex,
9386 we don't need to check. One exception is note 11, which indicates that
9387 target predicates are written regardless of PR[qp]. */
9388 if (qp_mutex (rs->qp_regno, qp_regno, path)
9392 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
9395 /* UNAT checking is a bit more specific than other resources */
9396 if (rs->dependency->specifier == IA64_RS_AR_UNAT
9397 && specs[count].mem_offset.hint
9398 && rs->mem_offset.hint)
9400 if (rs->mem_offset.base == specs[count].mem_offset.base)
9402 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
9403 ((specs[count].mem_offset.offset >> 3) & 0x3F))
9410 /* Skip apparent PR write conflicts where both writes are an AND or both
9411 writes are an OR. */
9412 if (rs->dependency->specifier == IA64_RS_PR
9413 || rs->dependency->specifier == IA64_RS_PRr
9414 || rs->dependency->specifier == IA64_RS_PR63)
9416 if (specs[count].cmp_type != CMP_NONE
9417 && specs[count].cmp_type == rs->cmp_type)
9420 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
9421 dv_mode[rs->dependency->mode],
9422 rs->dependency->specifier != IA64_RS_PR63 ?
9423 specs[count].index : 63);
9428 " %s on parallel compare conflict %s vs %s on PR%d\n",
9429 dv_mode[rs->dependency->mode],
9430 dv_cmp_type[rs->cmp_type],
9431 dv_cmp_type[specs[count].cmp_type],
9432 rs->dependency->specifier != IA64_RS_PR63 ?
9433 specs[count].index : 63);
9437 /* If either resource is not specific, conservatively assume a conflict
9439 if (!specs[count].specific || !rs->specific)
9441 else if (specs[count].index == rs->index)
9446 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
9452 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
9453 insert a stop to create the break. Update all resource dependencies
9454 appropriately. If QP_REGNO is non-zero, only apply the break to resources
9455 which use the same QP_REGNO and have the link_to_qp_branch flag set.
9456 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
9460 insn_group_break (insert_stop, qp_regno, save_current)
9467 if (insert_stop && md.num_slots_in_use > 0)
9468 PREV_SLOT.end_of_insn_group = 1;
9472 fprintf (stderr, " Insn group break%s",
9473 (insert_stop ? " (w/stop)" : ""));
9475 fprintf (stderr, " effective for QP=%d", qp_regno);
9476 fprintf (stderr, "\n");
9480 while (i < regdepslen)
9482 const struct ia64_dependency *dep = regdeps[i].dependency;
9485 && regdeps[i].qp_regno != qp_regno)
9492 && CURR_SLOT.src_file == regdeps[i].file
9493 && CURR_SLOT.src_line == regdeps[i].line)
9499 /* clear dependencies which are automatically cleared by a stop, or
9500 those that have reached the appropriate state of insn serialization */
9501 if (dep->semantics == IA64_DVS_IMPLIED
9502 || dep->semantics == IA64_DVS_IMPLIEDF
9503 || regdeps[i].insn_srlz == STATE_SRLZ)
9505 print_dependency ("Removing", i);
9506 regdeps[i] = regdeps[--regdepslen];
9510 if (dep->semantics == IA64_DVS_DATA
9511 || dep->semantics == IA64_DVS_INSTR
9512 || dep->semantics == IA64_DVS_SPECIFIC)
9514 if (regdeps[i].insn_srlz == STATE_NONE)
9515 regdeps[i].insn_srlz = STATE_STOP;
9516 if (regdeps[i].data_srlz == STATE_NONE)
9517 regdeps[i].data_srlz = STATE_STOP;
9524 /* Add the given resource usage spec to the list of active dependencies. */
9527 mark_resource (idesc, dep, spec, depind, path)
9528 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
9529 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
9534 if (regdepslen == regdepstotlen)
9536 regdepstotlen += 20;
9537 regdeps = (struct rsrc *)
9538 xrealloc ((void *) regdeps,
9539 regdepstotlen * sizeof (struct rsrc));
9542 regdeps[regdepslen] = *spec;
9543 regdeps[regdepslen].depind = depind;
9544 regdeps[regdepslen].path = path;
9545 regdeps[regdepslen].file = CURR_SLOT.src_file;
9546 regdeps[regdepslen].line = CURR_SLOT.src_line;
9548 print_dependency ("Adding", regdepslen);
9554 print_dependency (action, depind)
9560 fprintf (stderr, " %s %s '%s'",
9561 action, dv_mode[(regdeps[depind].dependency)->mode],
9562 (regdeps[depind].dependency)->name);
9563 if (regdeps[depind].specific && regdeps[depind].index != 0)
9564 fprintf (stderr, " (%d)", regdeps[depind].index);
9565 if (regdeps[depind].mem_offset.hint)
9567 fputs (" ", stderr);
9568 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
9569 fputs ("+", stderr);
9570 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
9572 fprintf (stderr, "\n");
9577 instruction_serialization ()
9581 fprintf (stderr, " Instruction serialization\n");
9582 for (i = 0; i < regdepslen; i++)
9583 if (regdeps[i].insn_srlz == STATE_STOP)
9584 regdeps[i].insn_srlz = STATE_SRLZ;
9588 data_serialization ()
9592 fprintf (stderr, " Data serialization\n");
9593 while (i < regdepslen)
9595 if (regdeps[i].data_srlz == STATE_STOP
9596 /* Note: as of 991210, all "other" dependencies are cleared by a
9597 data serialization. This might change with new tables */
9598 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
9600 print_dependency ("Removing", i);
9601 regdeps[i] = regdeps[--regdepslen];
9608 /* Insert stops and serializations as needed to avoid DVs. */
9611 remove_marked_resource (rs)
9614 switch (rs->dependency->semantics)
9616 case IA64_DVS_SPECIFIC:
9618 fprintf (stderr, "Implementation-specific, assume worst case...\n");
9619 /* ...fall through... */
9620 case IA64_DVS_INSTR:
9622 fprintf (stderr, "Inserting instr serialization\n");
9623 if (rs->insn_srlz < STATE_STOP)
9624 insn_group_break (1, 0, 0);
9625 if (rs->insn_srlz < STATE_SRLZ)
9627 struct slot oldslot = CURR_SLOT;
9628 /* Manually jam a srlz.i insn into the stream */
9629 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
9630 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
9631 instruction_serialization ();
9632 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9633 if (++md.num_slots_in_use >= NUM_SLOTS)
9635 CURR_SLOT = oldslot;
9637 insn_group_break (1, 0, 0);
9639 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
9640 "other" types of DV are eliminated
9641 by a data serialization */
9644 fprintf (stderr, "Inserting data serialization\n");
9645 if (rs->data_srlz < STATE_STOP)
9646 insn_group_break (1, 0, 0);
9648 struct slot oldslot = CURR_SLOT;
9649 /* Manually jam a srlz.d insn into the stream */
9650 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
9651 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9652 data_serialization ();
9653 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9654 if (++md.num_slots_in_use >= NUM_SLOTS)
9656 CURR_SLOT = oldslot;
9659 case IA64_DVS_IMPLIED:
9660 case IA64_DVS_IMPLIEDF:
9662 fprintf (stderr, "Inserting stop\n");
9663 insn_group_break (1, 0, 0);
9670 /* Check the resources used by the given opcode against the current dependency
9673 The check is run once for each execution path encountered. In this case,
9674 a unique execution path is the sequence of instructions following a code
9675 entry point, e.g. the following has three execution paths, one starting
9676 at L0, one at L1, and one at L2.
9685 check_dependencies (idesc)
9686 struct ia64_opcode *idesc;
9688 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9692 /* Note that the number of marked resources may change within the
9693 loop if in auto mode. */
9695 while (i < regdepslen)
9697 struct rsrc *rs = ®deps[i];
9698 const struct ia64_dependency *dep = rs->dependency;
9703 if (dep->semantics == IA64_DVS_NONE
9704 || (chkind = depends_on (rs->depind, idesc)) == -1)
9710 note = NOTE (opdeps->chks[chkind]);
9712 /* Check this resource against each execution path seen thus far. */
9713 for (path = 0; path <= md.path; path++)
9717 /* If the dependency wasn't on the path being checked, ignore it. */
9718 if (rs->path < path)
9721 /* If the QP for this insn implies a QP which has branched, don't
9722 bother checking. Ed. NOTE: I don't think this check is terribly
9723 useful; what's the point of generating code which will only be
9724 reached if its QP is zero?
9725 This code was specifically inserted to handle the following code,
9726 based on notes from Intel's DV checking code, where p1 implies p2.
9732 if (CURR_SLOT.qp_regno != 0)
9736 for (implies = 0; implies < qp_implieslen; implies++)
9738 if (qp_implies[implies].path >= path
9739 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9740 && qp_implies[implies].p2_branched)
9750 if ((matchtype = resources_match (rs, idesc, note,
9751 CURR_SLOT.qp_regno, path)) != 0)
9754 char pathmsg[256] = "";
9755 char indexmsg[256] = "";
9756 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9759 sprintf (pathmsg, " when entry is at label '%s'",
9760 md.entry_labels[path - 1]);
9761 if (rs->specific && rs->index != 0)
9762 sprintf (indexmsg, ", specific resource number is %d",
9764 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9766 (certain ? "violates" : "may violate"),
9767 dv_mode[dep->mode], dep->name,
9768 dv_sem[dep->semantics],
9771 if (md.explicit_mode)
9773 as_warn ("%s", msg);
9775 as_warn (_("Only the first path encountering the conflict "
9777 as_warn_where (rs->file, rs->line,
9778 _("This is the location of the "
9779 "conflicting usage"));
9780 /* Don't bother checking other paths, to avoid duplicating
9787 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9789 remove_marked_resource (rs);
9791 /* since the set of dependencies has changed, start over */
9792 /* FIXME -- since we're removing dvs as we go, we
9793 probably don't really need to start over... */
9806 /* Register new dependencies based on the given opcode. */
9809 mark_resources (idesc)
9810 struct ia64_opcode *idesc;
9813 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9814 int add_only_qp_reads = 0;
9816 /* A conditional branch only uses its resources if it is taken; if it is
9817 taken, we stop following that path. The other branch types effectively
9818 *always* write their resources. If it's not taken, register only QP
9820 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9822 add_only_qp_reads = 1;
9826 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9828 for (i = 0; i < opdeps->nregs; i++)
9830 const struct ia64_dependency *dep;
9831 struct rsrc specs[MAX_SPECS];
9836 dep = ia64_find_dependency (opdeps->regs[i]);
9837 note = NOTE (opdeps->regs[i]);
9839 if (add_only_qp_reads
9840 && !(dep->mode == IA64_DV_WAR
9841 && (dep->specifier == IA64_RS_PR
9842 || dep->specifier == IA64_RS_PRr
9843 || dep->specifier == IA64_RS_PR63)))
9846 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9849 if (md.debug_dv && !count)
9850 fprintf (stderr, " No %s %s usage found (path %d)\n",
9851 dv_mode[dep->mode], dep->name, md.path);
9856 mark_resource (idesc, dep, &specs[count],
9857 DEP (opdeps->regs[i]), md.path);
9860 /* The execution path may affect register values, which may in turn
9861 affect which indirect-access resources are accessed. */
9862 switch (dep->specifier)
9874 for (path = 0; path < md.path; path++)
9876 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9878 mark_resource (idesc, dep, &specs[count],
9879 DEP (opdeps->regs[i]), path);
9886 /* Remove dependencies when they no longer apply. */
9889 update_dependencies (idesc)
9890 struct ia64_opcode *idesc;
9894 if (strcmp (idesc->name, "srlz.i") == 0)
9896 instruction_serialization ();
9898 else if (strcmp (idesc->name, "srlz.d") == 0)
9900 data_serialization ();
9902 else if (is_interruption_or_rfi (idesc)
9903 || is_taken_branch (idesc))
9905 /* Although technically the taken branch doesn't clear dependencies
9906 which require a srlz.[id], we don't follow the branch; the next
9907 instruction is assumed to start with a clean slate. */
9911 else if (is_conditional_branch (idesc)
9912 && CURR_SLOT.qp_regno != 0)
9914 int is_call = strstr (idesc->name, ".call") != NULL;
9916 for (i = 0; i < qp_implieslen; i++)
9918 /* If the conditional branch's predicate is implied by the predicate
9919 in an existing dependency, remove that dependency. */
9920 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9923 /* Note that this implied predicate takes a branch so that if
9924 a later insn generates a DV but its predicate implies this
9925 one, we can avoid the false DV warning. */
9926 qp_implies[i].p2_branched = 1;
9927 while (depind < regdepslen)
9929 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9931 print_dependency ("Removing", depind);
9932 regdeps[depind] = regdeps[--regdepslen];
9939 /* Any marked resources which have this same predicate should be
9940 cleared, provided that the QP hasn't been modified between the
9941 marking instruction and the branch. */
9944 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9949 while (i < regdepslen)
9951 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9952 && regdeps[i].link_to_qp_branch
9953 && (regdeps[i].file != CURR_SLOT.src_file
9954 || regdeps[i].line != CURR_SLOT.src_line))
9956 /* Treat like a taken branch */
9957 print_dependency ("Removing", i);
9958 regdeps[i] = regdeps[--regdepslen];
9967 /* Examine the current instruction for dependency violations. */
9971 struct ia64_opcode *idesc;
9975 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9976 idesc->name, CURR_SLOT.src_line,
9977 idesc->dependencies->nchks,
9978 idesc->dependencies->nregs);
9981 /* Look through the list of currently marked resources; if the current
9982 instruction has the dependency in its chks list which uses that resource,
9983 check against the specific resources used. */
9984 check_dependencies (idesc);
9986 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9987 then add them to the list of marked resources. */
9988 mark_resources (idesc);
9990 /* There are several types of dependency semantics, and each has its own
9991 requirements for being cleared
9993 Instruction serialization (insns separated by interruption, rfi, or
9994 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
9996 Data serialization (instruction serialization, or writer + srlz.d +
9997 reader, where writer and srlz.d are in separate groups) clears
9998 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
9999 always be the case).
10001 Instruction group break (groups separated by stop, taken branch,
10002 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10004 update_dependencies (idesc);
10006 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10007 warning. Keep track of as many as possible that are useful. */
10008 note_register_values (idesc);
10010 /* We don't need or want this anymore. */
10011 md.mem_offset.hint = 0;
10016 /* Translate one line of assembly. Pseudo ops and labels do not show
10022 char *saved_input_line_pointer, *mnemonic;
10023 const struct pseudo_opcode *pdesc;
10024 struct ia64_opcode *idesc;
10025 unsigned char qp_regno;
10026 unsigned int flags;
10029 saved_input_line_pointer = input_line_pointer;
10030 input_line_pointer = str;
10032 /* extract the opcode (mnemonic): */
10034 mnemonic = input_line_pointer;
10035 ch = get_symbol_end ();
10036 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10039 *input_line_pointer = ch;
10040 (*pdesc->handler) (pdesc->arg);
10044 /* Find the instruction descriptor matching the arguments. */
10046 idesc = ia64_find_opcode (mnemonic);
10047 *input_line_pointer = ch;
10050 as_bad ("Unknown opcode `%s'", mnemonic);
10054 idesc = parse_operands (idesc);
10058 /* Handle the dynamic ops we can handle now: */
10059 if (idesc->type == IA64_TYPE_DYN)
10061 if (strcmp (idesc->name, "add") == 0)
10063 if (CURR_SLOT.opnd[2].X_op == O_register
10064 && CURR_SLOT.opnd[2].X_add_number < 4)
10068 ia64_free_opcode (idesc);
10069 idesc = ia64_find_opcode (mnemonic);
10071 know (!idesc->next);
10074 else if (strcmp (idesc->name, "mov") == 0)
10076 enum ia64_opnd opnd1, opnd2;
10079 opnd1 = idesc->operands[0];
10080 opnd2 = idesc->operands[1];
10081 if (opnd1 == IA64_OPND_AR3)
10083 else if (opnd2 == IA64_OPND_AR3)
10087 if (CURR_SLOT.opnd[rop].X_op == O_register)
10089 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10090 mnemonic = "mov.i";
10091 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10092 mnemonic = "mov.m";
10100 ia64_free_opcode (idesc);
10101 idesc = ia64_find_opcode (mnemonic);
10102 while (idesc != NULL
10103 && (idesc->operands[0] != opnd1
10104 || idesc->operands[1] != opnd2))
10105 idesc = get_next_opcode (idesc);
10109 else if (strcmp (idesc->name, "mov.i") == 0
10110 || strcmp (idesc->name, "mov.m") == 0)
10112 enum ia64_opnd opnd1, opnd2;
10115 opnd1 = idesc->operands[0];
10116 opnd2 = idesc->operands[1];
10117 if (opnd1 == IA64_OPND_AR3)
10119 else if (opnd2 == IA64_OPND_AR3)
10123 if (CURR_SLOT.opnd[rop].X_op == O_register)
10126 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10128 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10130 if (unit != 'a' && unit != idesc->name [4])
10131 as_bad ("AR %d cannot be accessed by %c-unit",
10132 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10138 if (md.qp.X_op == O_register)
10140 qp_regno = md.qp.X_add_number - REG_P;
10141 md.qp.X_op = O_absent;
10144 flags = idesc->flags;
10146 if ((flags & IA64_OPCODE_FIRST) != 0)
10148 /* The alignment frag has to end with a stop bit only if the
10149 next instruction after the alignment directive has to be
10150 the first instruction in an instruction group. */
10153 while (align_frag->fr_type != rs_align_code)
10155 align_frag = align_frag->fr_next;
10159 /* align_frag can be NULL if there are directives in
10161 if (align_frag && align_frag->fr_next == frag_now)
10162 align_frag->tc_frag_data = 1;
10165 insn_group_break (1, 0, 0);
10169 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10171 as_bad ("`%s' cannot be predicated", idesc->name);
10175 /* Build the instruction. */
10176 CURR_SLOT.qp_regno = qp_regno;
10177 CURR_SLOT.idesc = idesc;
10178 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
10179 dwarf2_where (&CURR_SLOT.debug_line);
10181 /* Add unwind entry, if there is one. */
10182 if (unwind.current_entry)
10184 CURR_SLOT.unwind_record = unwind.current_entry;
10185 unwind.current_entry = NULL;
10188 /* Check for dependency violations. */
10192 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10193 if (++md.num_slots_in_use >= NUM_SLOTS)
10194 emit_one_bundle ();
10196 if ((flags & IA64_OPCODE_LAST) != 0)
10197 insn_group_break (1, 0, 0);
10199 md.last_text_seg = now_seg;
10202 input_line_pointer = saved_input_line_pointer;
10205 /* Called when symbol NAME cannot be found in the symbol table.
10206 Should be used for dynamic valued symbols only. */
10209 md_undefined_symbol (name)
10210 char *name ATTRIBUTE_UNUSED;
10215 /* Called for any expression that can not be recognized. When the
10216 function is called, `input_line_pointer' will point to the start of
10223 enum pseudo_type pseudo_type;
10228 switch (*input_line_pointer)
10231 /* Find what relocation pseudo-function we're dealing with. */
10233 ch = *++input_line_pointer;
10234 for (i = 0; i < NELEMS (pseudo_func); ++i)
10235 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
10237 len = strlen (pseudo_func[i].name);
10238 if (strncmp (pseudo_func[i].name + 1,
10239 input_line_pointer + 1, len - 1) == 0
10240 && !is_part_of_name (input_line_pointer[len]))
10242 input_line_pointer += len;
10243 pseudo_type = pseudo_func[i].type;
10247 switch (pseudo_type)
10249 case PSEUDO_FUNC_RELOC:
10250 SKIP_WHITESPACE ();
10251 if (*input_line_pointer != '(')
10253 as_bad ("Expected '('");
10257 ++input_line_pointer;
10259 if (*input_line_pointer++ != ')')
10261 as_bad ("Missing ')'");
10264 if (e->X_op != O_symbol)
10266 if (e->X_op != O_pseudo_fixup)
10268 as_bad ("Not a symbolic expression");
10271 if (i != FUNC_LT_RELATIVE)
10273 as_bad ("Illegal combination of relocation functions");
10276 switch (S_GET_VALUE (e->X_op_symbol))
10278 case FUNC_FPTR_RELATIVE:
10279 i = FUNC_LT_FPTR_RELATIVE; break;
10280 case FUNC_DTP_MODULE:
10281 i = FUNC_LT_DTP_MODULE; break;
10282 case FUNC_DTP_RELATIVE:
10283 i = FUNC_LT_DTP_RELATIVE; break;
10284 case FUNC_TP_RELATIVE:
10285 i = FUNC_LT_TP_RELATIVE; break;
10287 as_bad ("Illegal combination of relocation functions");
10291 /* Make sure gas doesn't get rid of local symbols that are used
10293 e->X_op = O_pseudo_fixup;
10294 e->X_op_symbol = pseudo_func[i].u.sym;
10297 case PSEUDO_FUNC_CONST:
10298 e->X_op = O_constant;
10299 e->X_add_number = pseudo_func[i].u.ival;
10302 case PSEUDO_FUNC_REG:
10303 e->X_op = O_register;
10304 e->X_add_number = pseudo_func[i].u.ival;
10308 name = input_line_pointer - 1;
10310 as_bad ("Unknown pseudo function `%s'", name);
10316 ++input_line_pointer;
10318 if (*input_line_pointer != ']')
10320 as_bad ("Closing bracket misssing");
10325 if (e->X_op != O_register)
10326 as_bad ("Register expected as index");
10328 ++input_line_pointer;
10339 ignore_rest_of_line ();
10342 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10343 a section symbol plus some offset. For relocs involving @fptr(),
10344 directives we don't want such adjustments since we need to have the
10345 original symbol's name in the reloc. */
10347 ia64_fix_adjustable (fix)
10350 /* Prevent all adjustments to global symbols */
10351 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10354 switch (fix->fx_r_type)
10356 case BFD_RELOC_IA64_FPTR64I:
10357 case BFD_RELOC_IA64_FPTR32MSB:
10358 case BFD_RELOC_IA64_FPTR32LSB:
10359 case BFD_RELOC_IA64_FPTR64MSB:
10360 case BFD_RELOC_IA64_FPTR64LSB:
10361 case BFD_RELOC_IA64_LTOFF_FPTR22:
10362 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10372 ia64_force_relocation (fix)
10375 switch (fix->fx_r_type)
10377 case BFD_RELOC_IA64_FPTR64I:
10378 case BFD_RELOC_IA64_FPTR32MSB:
10379 case BFD_RELOC_IA64_FPTR32LSB:
10380 case BFD_RELOC_IA64_FPTR64MSB:
10381 case BFD_RELOC_IA64_FPTR64LSB:
10383 case BFD_RELOC_IA64_LTOFF22:
10384 case BFD_RELOC_IA64_LTOFF64I:
10385 case BFD_RELOC_IA64_LTOFF_FPTR22:
10386 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10387 case BFD_RELOC_IA64_PLTOFF22:
10388 case BFD_RELOC_IA64_PLTOFF64I:
10389 case BFD_RELOC_IA64_PLTOFF64MSB:
10390 case BFD_RELOC_IA64_PLTOFF64LSB:
10392 case BFD_RELOC_IA64_LTOFF22X:
10393 case BFD_RELOC_IA64_LDXMOV:
10400 return generic_force_reloc (fix);
10403 /* Decide from what point a pc-relative relocation is relative to,
10404 relative to the pc-relative fixup. Er, relatively speaking. */
10406 ia64_pcrel_from_section (fix, sec)
10410 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10412 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10419 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10421 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10425 expr.X_op = O_pseudo_fixup;
10426 expr.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10427 expr.X_add_number = 0;
10428 expr.X_add_symbol = symbol;
10429 emit_expr (&expr, size);
10432 /* This is called whenever some data item (not an instruction) needs a
10433 fixup. We pick the right reloc code depending on the byteorder
10434 currently in effect. */
10436 ia64_cons_fix_new (f, where, nbytes, exp)
10442 bfd_reloc_code_real_type code;
10447 /* There are no reloc for 8 and 16 bit quantities, but we allow
10448 them here since they will work fine as long as the expression
10449 is fully defined at the end of the pass over the source file. */
10450 case 1: code = BFD_RELOC_8; break;
10451 case 2: code = BFD_RELOC_16; break;
10453 if (target_big_endian)
10454 code = BFD_RELOC_IA64_DIR32MSB;
10456 code = BFD_RELOC_IA64_DIR32LSB;
10460 /* In 32-bit mode, data8 could mean function descriptors too. */
10461 if (exp->X_op == O_pseudo_fixup
10462 && exp->X_op_symbol
10463 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
10464 && !(md.flags & EF_IA_64_ABI64))
10466 if (target_big_endian)
10467 code = BFD_RELOC_IA64_IPLTMSB;
10469 code = BFD_RELOC_IA64_IPLTLSB;
10470 exp->X_op = O_symbol;
10475 if (target_big_endian)
10476 code = BFD_RELOC_IA64_DIR64MSB;
10478 code = BFD_RELOC_IA64_DIR64LSB;
10483 if (exp->X_op == O_pseudo_fixup
10484 && exp->X_op_symbol
10485 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
10487 if (target_big_endian)
10488 code = BFD_RELOC_IA64_IPLTMSB;
10490 code = BFD_RELOC_IA64_IPLTLSB;
10491 exp->X_op = O_symbol;
10497 as_bad ("Unsupported fixup size %d", nbytes);
10498 ignore_rest_of_line ();
10502 if (exp->X_op == O_pseudo_fixup)
10504 exp->X_op = O_symbol;
10505 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
10506 /* ??? If code unchanged, unsupported. */
10509 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
10510 /* We need to store the byte order in effect in case we're going
10511 to fix an 8 or 16 bit relocation (for which there no real
10512 relocs available). See md_apply_fix3(). */
10513 fix->tc_fix_data.bigendian = target_big_endian;
10516 /* Return the actual relocation we wish to associate with the pseudo
10517 reloc described by SYM and R_TYPE. SYM should be one of the
10518 symbols in the pseudo_func array, or NULL. */
10520 static bfd_reloc_code_real_type
10521 ia64_gen_real_reloc_type (sym, r_type)
10522 struct symbol *sym;
10523 bfd_reloc_code_real_type r_type;
10525 bfd_reloc_code_real_type new = 0;
10532 switch (S_GET_VALUE (sym))
10534 case FUNC_FPTR_RELATIVE:
10537 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
10538 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
10539 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
10540 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
10541 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
10546 case FUNC_GP_RELATIVE:
10549 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
10550 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
10551 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
10552 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
10553 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
10554 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
10559 case FUNC_LT_RELATIVE:
10562 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
10563 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
10568 case FUNC_LT_RELATIVE_X:
10571 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22X; break;
10576 case FUNC_PC_RELATIVE:
10579 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
10580 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
10581 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
10582 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
10583 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
10584 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
10589 case FUNC_PLT_RELATIVE:
10592 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
10593 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
10594 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
10595 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
10600 case FUNC_SEC_RELATIVE:
10603 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
10604 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
10605 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
10606 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
10611 case FUNC_SEG_RELATIVE:
10614 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
10615 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
10616 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
10617 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
10622 case FUNC_LTV_RELATIVE:
10625 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
10626 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
10627 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
10628 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
10633 case FUNC_LT_FPTR_RELATIVE:
10636 case BFD_RELOC_IA64_IMM22:
10637 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
10638 case BFD_RELOC_IA64_IMM64:
10639 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
10645 case FUNC_TP_RELATIVE:
10648 case BFD_RELOC_IA64_IMM14:
10649 new = BFD_RELOC_IA64_TPREL14; break;
10650 case BFD_RELOC_IA64_IMM22:
10651 new = BFD_RELOC_IA64_TPREL22; break;
10652 case BFD_RELOC_IA64_IMM64:
10653 new = BFD_RELOC_IA64_TPREL64I; break;
10659 case FUNC_LT_TP_RELATIVE:
10662 case BFD_RELOC_IA64_IMM22:
10663 new = BFD_RELOC_IA64_LTOFF_TPREL22; break;
10669 case FUNC_LT_DTP_MODULE:
10672 case BFD_RELOC_IA64_IMM22:
10673 new = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
10679 case FUNC_DTP_RELATIVE:
10682 case BFD_RELOC_IA64_DIR64MSB:
10683 new = BFD_RELOC_IA64_DTPREL64MSB; break;
10684 case BFD_RELOC_IA64_DIR64LSB:
10685 new = BFD_RELOC_IA64_DTPREL64LSB; break;
10686 case BFD_RELOC_IA64_IMM14:
10687 new = BFD_RELOC_IA64_DTPREL14; break;
10688 case BFD_RELOC_IA64_IMM22:
10689 new = BFD_RELOC_IA64_DTPREL22; break;
10690 case BFD_RELOC_IA64_IMM64:
10691 new = BFD_RELOC_IA64_DTPREL64I; break;
10697 case FUNC_LT_DTP_RELATIVE:
10700 case BFD_RELOC_IA64_IMM22:
10701 new = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
10707 case FUNC_IPLT_RELOC:
10714 /* Hmmmm. Should this ever occur? */
10721 /* Here is where generate the appropriate reloc for pseudo relocation
10724 ia64_validate_fix (fix)
10727 switch (fix->fx_r_type)
10729 case BFD_RELOC_IA64_FPTR64I:
10730 case BFD_RELOC_IA64_FPTR32MSB:
10731 case BFD_RELOC_IA64_FPTR64LSB:
10732 case BFD_RELOC_IA64_LTOFF_FPTR22:
10733 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10734 if (fix->fx_offset != 0)
10735 as_bad_where (fix->fx_file, fix->fx_line,
10736 "No addend allowed in @fptr() relocation");
10744 fix_insn (fix, odesc, value)
10746 const struct ia64_operand *odesc;
10749 bfd_vma insn[3], t0, t1, control_bits;
10754 slot = fix->fx_where & 0x3;
10755 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
10757 /* Bundles are always in little-endian byte order */
10758 t0 = bfd_getl64 (fixpos);
10759 t1 = bfd_getl64 (fixpos + 8);
10760 control_bits = t0 & 0x1f;
10761 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
10762 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
10763 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
10766 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
10768 insn[1] = (value >> 22) & 0x1ffffffffffLL;
10769 insn[2] |= (((value & 0x7f) << 13)
10770 | (((value >> 7) & 0x1ff) << 27)
10771 | (((value >> 16) & 0x1f) << 22)
10772 | (((value >> 21) & 0x1) << 21)
10773 | (((value >> 63) & 0x1) << 36));
10775 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
10777 if (value & ~0x3fffffffffffffffULL)
10778 err = "integer operand out of range";
10779 insn[1] = (value >> 21) & 0x1ffffffffffLL;
10780 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
10782 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
10785 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
10786 insn[2] |= ((((value >> 59) & 0x1) << 36)
10787 | (((value >> 0) & 0xfffff) << 13));
10790 err = (*odesc->insert) (odesc, value, insn + slot);
10793 as_bad_where (fix->fx_file, fix->fx_line, err);
10795 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
10796 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
10797 number_to_chars_littleendian (fixpos + 0, t0, 8);
10798 number_to_chars_littleendian (fixpos + 8, t1, 8);
10801 /* Attempt to simplify or even eliminate a fixup. The return value is
10802 ignored; perhaps it was once meaningful, but now it is historical.
10803 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
10805 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
10809 md_apply_fix3 (fix, valP, seg)
10812 segT seg ATTRIBUTE_UNUSED;
10815 valueT value = *valP;
10817 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
10821 switch (fix->fx_r_type)
10823 case BFD_RELOC_IA64_DIR32MSB:
10824 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
10827 case BFD_RELOC_IA64_DIR32LSB:
10828 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
10831 case BFD_RELOC_IA64_DIR64MSB:
10832 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10835 case BFD_RELOC_IA64_DIR64LSB:
10836 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10845 switch (fix->fx_r_type)
10847 case BFD_RELOC_UNUSED:
10848 /* This must be a TAG13 or TAG13b operand. There are no external
10849 relocs defined for them, so we must give an error. */
10850 as_bad_where (fix->fx_file, fix->fx_line,
10851 "%s must have a constant value",
10852 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10856 case BFD_RELOC_IA64_TPREL14:
10857 case BFD_RELOC_IA64_TPREL22:
10858 case BFD_RELOC_IA64_TPREL64I:
10859 case BFD_RELOC_IA64_LTOFF_TPREL22:
10860 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
10861 case BFD_RELOC_IA64_DTPREL14:
10862 case BFD_RELOC_IA64_DTPREL22:
10863 case BFD_RELOC_IA64_DTPREL64I:
10864 case BFD_RELOC_IA64_LTOFF_DTPREL22:
10865 S_SET_THREAD_LOCAL (fix->fx_addsy);
10872 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10874 if (fix->tc_fix_data.bigendian)
10875 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10877 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10882 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10887 /* Generate the BFD reloc to be stuck in the object file from the
10888 fixup used internally in the assembler. */
10891 tc_gen_reloc (sec, fixp)
10892 asection *sec ATTRIBUTE_UNUSED;
10897 reloc = xmalloc (sizeof (*reloc));
10898 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10899 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10900 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10901 reloc->addend = fixp->fx_offset;
10902 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10906 as_bad_where (fixp->fx_file, fixp->fx_line,
10907 "Cannot represent %s relocation in object file",
10908 bfd_get_reloc_code_name (fixp->fx_r_type));
10913 /* Turn a string in input_line_pointer into a floating point constant
10914 of type TYPE, and store the appropriate bytes in *LIT. The number
10915 of LITTLENUMS emitted is stored in *SIZE. An error message is
10916 returned, or NULL on OK. */
10918 #define MAX_LITTLENUMS 5
10921 md_atof (type, lit, size)
10926 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10956 return "Bad call to MD_ATOF()";
10958 t = atof_ieee (input_line_pointer, type, words);
10960 input_line_pointer = t;
10962 (*ia64_float_to_chars) (lit, words, prec);
10966 /* It is 10 byte floating point with 6 byte padding. */
10967 memset (&lit [10], 0, 6);
10968 *size = 8 * sizeof (LITTLENUM_TYPE);
10971 *size = prec * sizeof (LITTLENUM_TYPE);
10976 /* Handle ia64 specific semantics of the align directive. */
10979 ia64_md_do_align (n, fill, len, max)
10980 int n ATTRIBUTE_UNUSED;
10981 const char *fill ATTRIBUTE_UNUSED;
10982 int len ATTRIBUTE_UNUSED;
10983 int max ATTRIBUTE_UNUSED;
10985 if (subseg_text_p (now_seg))
10986 ia64_flush_insns ();
10989 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10990 of an rs_align_code fragment. */
10993 ia64_handle_align (fragp)
10996 /* Use mfi bundle of nops with no stop bits. */
10997 static const unsigned char le_nop[]
10998 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10999 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
11000 static const unsigned char le_nop_stop[]
11001 = { 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
11002 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
11006 const unsigned char *nop;
11008 if (fragp->fr_type != rs_align_code)
11011 /* Check if this frag has to end with a stop bit. */
11012 nop = fragp->tc_frag_data ? le_nop_stop : le_nop;
11014 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11015 p = fragp->fr_literal + fragp->fr_fix;
11017 /* If no paddings are needed, we check if we need a stop bit. */
11018 if (!bytes && fragp->tc_frag_data)
11020 if (fragp->fr_fix < 16)
11022 /* FIXME: It won't work with
11024 alloc r32=ar.pfs,1,2,4,0
11028 as_bad_where (fragp->fr_file, fragp->fr_line,
11029 _("Can't add stop bit to mark end of instruction group"));
11032 /* Bundles are always in little-endian byte order. Make sure
11033 the previous bundle has the stop bit. */
11037 /* Make sure we are on a 16-byte boundary, in case someone has been
11038 putting data into a text section. */
11041 int fix = bytes & 15;
11042 memset (p, 0, fix);
11045 fragp->fr_fix += fix;
11048 /* Instruction bundles are always little-endian. */
11049 memcpy (p, nop, 16);
11050 fragp->fr_var = 16;
11054 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11059 number_to_chars_bigendian (lit, (long) (*words++),
11060 sizeof (LITTLENUM_TYPE));
11061 lit += sizeof (LITTLENUM_TYPE);
11066 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11071 number_to_chars_littleendian (lit, (long) (words[prec]),
11072 sizeof (LITTLENUM_TYPE));
11073 lit += sizeof (LITTLENUM_TYPE);
11078 ia64_elf_section_change_hook (void)
11080 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11081 && elf_linked_to_section (now_seg) == NULL)
11082 elf_linked_to_section (now_seg) = text_section;
11083 dot_byteorder (-1);
11086 /* Check if a label should be made global. */
11088 ia64_check_label (symbolS *label)
11090 if (*input_line_pointer == ':')
11092 S_SET_EXTERNAL (label);
11093 input_line_pointer++;
11097 /* Used to remember where .alias and .secalias directives are seen. We
11098 will rename symbol and section names when we are about to output
11099 the relocatable file. */
11102 char *file; /* The file where the directive is seen. */
11103 unsigned int line; /* The line number the directive is at. */
11104 const char *name; /* The orignale name of the symbol. */
11107 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11108 .secalias. Otherwise, it is .alias. */
11110 dot_alias (int section)
11112 char *name, *alias;
11116 const char *error_string;
11119 struct hash_control *ahash, *nhash;
11122 name = input_line_pointer;
11123 delim = get_symbol_end ();
11124 end_name = input_line_pointer;
11127 if (name == end_name)
11129 as_bad (_("expected symbol name"));
11130 discard_rest_of_line ();
11134 SKIP_WHITESPACE ();
11136 if (*input_line_pointer != ',')
11139 as_bad (_("expected comma after \"%s\""), name);
11141 ignore_rest_of_line ();
11145 input_line_pointer++;
11148 /* We call demand_copy_C_string to check if alias string is valid.
11149 There should be a closing `"' and no `\0' in the string. */
11150 alias = demand_copy_C_string (&len);
11153 ignore_rest_of_line ();
11157 /* Make a copy of name string. */
11158 len = strlen (name) + 1;
11159 obstack_grow (¬es, name, len);
11160 name = obstack_finish (¬es);
11165 ahash = secalias_hash;
11166 nhash = secalias_name_hash;
11171 ahash = alias_hash;
11172 nhash = alias_name_hash;
11175 /* Check if alias has been used before. */
11176 h = (struct alias *) hash_find (ahash, alias);
11179 if (strcmp (h->name, name))
11180 as_bad (_("`%s' is already the alias of %s `%s'"),
11181 alias, kind, h->name);
11185 /* Check if name already has an alias. */
11186 a = (const char *) hash_find (nhash, name);
11189 if (strcmp (a, alias))
11190 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11194 h = (struct alias *) xmalloc (sizeof (struct alias));
11195 as_where (&h->file, &h->line);
11198 error_string = hash_jam (ahash, alias, (PTR) h);
11201 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11202 alias, kind, error_string);
11206 error_string = hash_jam (nhash, name, (PTR) alias);
11209 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11210 alias, kind, error_string);
11212 obstack_free (¬es, name);
11213 obstack_free (¬es, alias);
11216 demand_empty_rest_of_line ();
11219 /* It renames the original symbol name to its alias. */
11221 do_alias (const char *alias, PTR value)
11223 struct alias *h = (struct alias *) value;
11224 symbolS *sym = symbol_find (h->name);
11227 as_warn_where (h->file, h->line,
11228 _("symbol `%s' aliased to `%s' is not used"),
11231 S_SET_NAME (sym, (char *) alias);
11234 /* Called from write_object_file. */
11236 ia64_adjust_symtab (void)
11238 hash_traverse (alias_hash, do_alias);
11241 /* It renames the original section name to its alias. */
11243 do_secalias (const char *alias, PTR value)
11245 struct alias *h = (struct alias *) value;
11246 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11249 as_warn_where (h->file, h->line,
11250 _("section `%s' aliased to `%s' is not used"),
11256 /* Called from write_object_file. */
11258 ia64_frob_file (void)
11260 hash_traverse (secalias_hash, do_secalias);