1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
6 This file is part of GAS, the GNU Assembler.
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
50 #include "opcode/ia64.h"
58 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
59 #define MIN(a,b) ((a) < (b) ? (a) : (b))
62 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
63 #define CURR_SLOT md.slot[md.curr_slot]
65 #define O_pseudo_fixup (O_max + 1)
69 /* IA-64 ABI section pseudo-ops. */
70 SPECIAL_SECTION_BSS = 0,
72 SPECIAL_SECTION_SDATA,
73 SPECIAL_SECTION_RODATA,
74 SPECIAL_SECTION_COMMENT,
75 SPECIAL_SECTION_UNWIND,
76 SPECIAL_SECTION_UNWIND_INFO,
77 /* HPUX specific section pseudo-ops. */
78 SPECIAL_SECTION_INIT_ARRAY,
79 SPECIAL_SECTION_FINI_ARRAY,
96 FUNC_LT_FPTR_RELATIVE,
106 REG_FR = (REG_GR + 128),
107 REG_AR = (REG_FR + 128),
108 REG_CR = (REG_AR + 128),
109 REG_P = (REG_CR + 128),
110 REG_BR = (REG_P + 64),
111 REG_IP = (REG_BR + 8),
118 /* The following are pseudo-registers for use by gas only. */
130 /* The following pseudo-registers are used for unwind directives only: */
138 DYNREG_GR = 0, /* dynamic general purpose register */
139 DYNREG_FR, /* dynamic floating point register */
140 DYNREG_PR, /* dynamic predicate register */
144 enum operand_match_result
147 OPERAND_OUT_OF_RANGE,
151 /* On the ia64, we can't know the address of a text label until the
152 instructions are packed into a bundle. To handle this, we keep
153 track of the list of labels that appear in front of each
157 struct label_fix *next;
161 /* This is the endianness of the current section. */
162 extern int target_big_endian;
164 /* This is the default endianness. */
165 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
167 void (*ia64_number_to_chars) PARAMS ((char *, valueT, int));
169 static void ia64_float_to_chars_bigendian
170 PARAMS ((char *, LITTLENUM_TYPE *, int));
171 static void ia64_float_to_chars_littleendian
172 PARAMS ((char *, LITTLENUM_TYPE *, int));
173 static void (*ia64_float_to_chars)
174 PARAMS ((char *, LITTLENUM_TYPE *, int));
176 static struct hash_control *alias_hash;
177 static struct hash_control *alias_name_hash;
178 static struct hash_control *secalias_hash;
179 static struct hash_control *secalias_name_hash;
181 /* List of chars besides those in app.c:symbol_chars that can start an
182 operand. Used to prevent the scrubber eating vital white-space. */
183 const char ia64_symbol_chars[] = "@?";
185 /* Characters which always start a comment. */
186 const char comment_chars[] = "";
188 /* Characters which start a comment at the beginning of a line. */
189 const char line_comment_chars[] = "#";
191 /* Characters which may be used to separate multiple commands on a
193 const char line_separator_chars[] = ";";
195 /* Characters which are used to indicate an exponent in a floating
197 const char EXP_CHARS[] = "eE";
199 /* Characters which mean that a number is a floating point constant,
201 const char FLT_CHARS[] = "rRsSfFdDxXpP";
203 /* ia64-specific option processing: */
205 const char *md_shortopts = "m:N:x::";
207 struct option md_longopts[] =
209 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
210 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
211 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
212 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
215 size_t md_longopts_size = sizeof (md_longopts);
219 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
220 struct hash_control *reg_hash; /* register name hash table */
221 struct hash_control *dynreg_hash; /* dynamic register hash table */
222 struct hash_control *const_hash; /* constant hash table */
223 struct hash_control *entry_hash; /* code entry hint hash table */
225 symbolS *regsym[REG_NUM];
227 /* If X_op is != O_absent, the registername for the instruction's
228 qualifying predicate. If NULL, p0 is assumed for instructions
229 that are predicatable. */
232 /* Optimize for which CPU. */
239 /* What to do when hint.b is used. */
251 explicit_mode : 1, /* which mode we're in */
252 default_explicit_mode : 1, /* which mode is the default */
253 mode_explicitly_set : 1, /* was the current mode explicitly set? */
255 keep_pending_output : 1;
257 /* What to do when something is wrong with unwind directives. */
260 unwind_check_warning,
264 /* Each bundle consists of up to three instructions. We keep
265 track of four most recent instructions so we can correctly set
266 the end_of_insn_group for the last instruction in a bundle. */
268 int num_slots_in_use;
272 end_of_insn_group : 1,
273 manual_bundling_on : 1,
274 manual_bundling_off : 1,
275 loc_directive_seen : 1;
276 signed char user_template; /* user-selected template, if any */
277 unsigned char qp_regno; /* qualifying predicate */
278 /* This duplicates a good fraction of "struct fix" but we
279 can't use a "struct fix" instead since we can't call
280 fix_new_exp() until we know the address of the instruction. */
284 bfd_reloc_code_real_type code;
285 enum ia64_opnd opnd; /* type of operand in need of fix */
286 unsigned int is_pcrel : 1; /* is operand pc-relative? */
287 expressionS expr; /* the value to be inserted */
289 fixup[2]; /* at most two fixups per insn */
290 struct ia64_opcode *idesc;
291 struct label_fix *label_fixups;
292 struct label_fix *tag_fixups;
293 struct unw_rec_list *unwind_record; /* Unwind directive. */
296 unsigned int src_line;
297 struct dwarf2_line_info debug_line;
305 struct dynreg *next; /* next dynamic register */
307 unsigned short base; /* the base register number */
308 unsigned short num_regs; /* # of registers in this set */
310 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
312 flagword flags; /* ELF-header flags */
315 unsigned hint:1; /* is this hint currently valid? */
316 bfd_vma offset; /* mem.offset offset */
317 bfd_vma base; /* mem.offset base */
320 int path; /* number of alt. entry points seen */
321 const char **entry_labels; /* labels of all alternate paths in
322 the current DV-checking block. */
323 int maxpaths; /* size currently allocated for
326 int pointer_size; /* size in bytes of a pointer */
327 int pointer_size_shift; /* shift size of a pointer for alignment */
331 /* These are not const, because they are modified to MMI for non-itanium1
333 /* MFI bundle of nops. */
334 static unsigned char le_nop[16] =
336 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
337 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
339 /* MFI bundle of nops with stop-bit. */
340 static unsigned char le_nop_stop[16] =
342 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
343 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
346 /* application registers: */
352 #define AR_BSPSTORE 18
367 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
368 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
369 {"ar.rsc", 16}, {"ar.bsp", 17},
370 {"ar.bspstore", 18}, {"ar.rnat", 19},
371 {"ar.fcr", 21}, {"ar.eflag", 24},
372 {"ar.csd", 25}, {"ar.ssd", 26},
373 {"ar.cflg", 27}, {"ar.fsr", 28},
374 {"ar.fir", 29}, {"ar.fdr", 30},
375 {"ar.ccv", 32}, {"ar.unat", 36},
376 {"ar.fpsr", 40}, {"ar.itc", 44},
377 {"ar.pfs", 64}, {"ar.lc", 65},
398 /* control registers: */
440 static const struct const_desc
447 /* PSR constant masks: */
450 {"psr.be", ((valueT) 1) << 1},
451 {"psr.up", ((valueT) 1) << 2},
452 {"psr.ac", ((valueT) 1) << 3},
453 {"psr.mfl", ((valueT) 1) << 4},
454 {"psr.mfh", ((valueT) 1) << 5},
456 {"psr.ic", ((valueT) 1) << 13},
457 {"psr.i", ((valueT) 1) << 14},
458 {"psr.pk", ((valueT) 1) << 15},
460 {"psr.dt", ((valueT) 1) << 17},
461 {"psr.dfl", ((valueT) 1) << 18},
462 {"psr.dfh", ((valueT) 1) << 19},
463 {"psr.sp", ((valueT) 1) << 20},
464 {"psr.pp", ((valueT) 1) << 21},
465 {"psr.di", ((valueT) 1) << 22},
466 {"psr.si", ((valueT) 1) << 23},
467 {"psr.db", ((valueT) 1) << 24},
468 {"psr.lp", ((valueT) 1) << 25},
469 {"psr.tb", ((valueT) 1) << 26},
470 {"psr.rt", ((valueT) 1) << 27},
471 /* 28-31: reserved */
472 /* 32-33: cpl (current privilege level) */
473 {"psr.is", ((valueT) 1) << 34},
474 {"psr.mc", ((valueT) 1) << 35},
475 {"psr.it", ((valueT) 1) << 36},
476 {"psr.id", ((valueT) 1) << 37},
477 {"psr.da", ((valueT) 1) << 38},
478 {"psr.dd", ((valueT) 1) << 39},
479 {"psr.ss", ((valueT) 1) << 40},
480 /* 41-42: ri (restart instruction) */
481 {"psr.ed", ((valueT) 1) << 43},
482 {"psr.bn", ((valueT) 1) << 44},
485 /* indirect register-sets/memory: */
494 { "CPUID", IND_CPUID },
495 { "cpuid", IND_CPUID },
507 /* Pseudo functions used to indicate relocation types (these functions
508 start with an at sign (@). */
530 /* reloc pseudo functions (these must come first!): */
531 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
532 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
533 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
534 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
535 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
536 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
537 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
538 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
539 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
540 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
541 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
542 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
543 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
544 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
545 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
546 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
547 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
549 /* mbtype4 constants: */
550 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
551 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
552 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
553 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
554 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
556 /* fclass constants: */
557 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
558 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
559 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
560 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
561 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
562 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
563 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
564 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
565 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
567 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
569 /* hint constants: */
570 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
572 /* unwind-related constants: */
573 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
574 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
575 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
576 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_LINUX } },
577 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
578 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
579 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
581 /* unwind-related registers: */
582 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
585 /* 41-bit nop opcodes (one per unit): */
586 static const bfd_vma nop[IA64_NUM_UNITS] =
588 0x0000000000LL, /* NIL => break 0 */
589 0x0008000000LL, /* I-unit nop */
590 0x0008000000LL, /* M-unit nop */
591 0x4000000000LL, /* B-unit nop */
592 0x0008000000LL, /* F-unit nop */
593 0x0008000000LL, /* L-"unit" nop */
594 0x0008000000LL, /* X-unit nop */
597 /* Can't be `const' as it's passed to input routines (which have the
598 habit of setting temporary sentinels. */
599 static char special_section_name[][20] =
601 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
602 {".IA_64.unwind"}, {".IA_64.unwind_info"},
603 {".init_array"}, {".fini_array"}
606 /* The best template for a particular sequence of up to three
608 #define N IA64_NUM_TYPES
609 static unsigned char best_template[N][N][N];
612 /* Resource dependencies currently in effect */
614 int depind; /* dependency index */
615 const struct ia64_dependency *dependency; /* actual dependency */
616 unsigned specific:1, /* is this a specific bit/regno? */
617 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
618 int index; /* specific regno/bit within dependency */
619 int note; /* optional qualifying note (0 if none) */
623 int insn_srlz; /* current insn serialization state */
624 int data_srlz; /* current data serialization state */
625 int qp_regno; /* qualifying predicate for this usage */
626 char *file; /* what file marked this dependency */
627 unsigned int line; /* what line marked this dependency */
628 struct mem_offset mem_offset; /* optional memory offset hint */
629 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
630 int path; /* corresponding code entry index */
632 static int regdepslen = 0;
633 static int regdepstotlen = 0;
634 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
635 static const char *dv_sem[] = { "none", "implied", "impliedf",
636 "data", "instr", "specific", "stop", "other" };
637 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
639 /* Current state of PR mutexation */
640 static struct qpmutex {
643 } *qp_mutexes = NULL; /* QP mutex bitmasks */
644 static int qp_mutexeslen = 0;
645 static int qp_mutexestotlen = 0;
646 static valueT qp_safe_across_calls = 0;
648 /* Current state of PR implications */
649 static struct qp_imply {
652 unsigned p2_branched:1;
654 } *qp_implies = NULL;
655 static int qp_implieslen = 0;
656 static int qp_impliestotlen = 0;
658 /* Keep track of static GR values so that indirect register usage can
659 sometimes be tracked. */
670 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
676 /* Remember the alignment frag. */
677 static fragS *align_frag;
679 /* These are the routines required to output the various types of
682 /* A slot_number is a frag address plus the slot index (0-2). We use the
683 frag address here so that if there is a section switch in the middle of
684 a function, then instructions emitted to a different section are not
685 counted. Since there may be more than one frag for a function, this
686 means we also need to keep track of which frag this address belongs to
687 so we can compute inter-frag distances. This also nicely solves the
688 problem with nops emitted for align directives, which can't easily be
689 counted, but can easily be derived from frag sizes. */
691 typedef struct unw_rec_list {
693 unsigned long slot_number;
695 unsigned long next_slot_number;
696 fragS *next_slot_frag;
697 struct unw_rec_list *next;
700 #define SLOT_NUM_NOT_SET (unsigned)-1
702 /* Linked list of saved prologue counts. A very poor
703 implementation of a map from label numbers to prologue counts. */
704 typedef struct label_prologue_count
706 struct label_prologue_count *next;
707 unsigned long label_number;
708 unsigned int prologue_count;
709 } label_prologue_count;
713 /* Maintain a list of unwind entries for the current function. */
717 /* Any unwind entires that should be attached to the current slot
718 that an insn is being constructed for. */
719 unw_rec_list *current_entry;
721 /* These are used to create the unwind table entry for this function. */
723 symbolS *info; /* pointer to unwind info */
724 symbolS *personality_routine;
726 subsegT saved_text_subseg;
727 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
729 /* TRUE if processing unwind directives in a prologue region. */
730 unsigned int prologue : 1;
731 unsigned int prologue_mask : 4;
732 unsigned int body : 1;
733 unsigned int insn : 1;
734 unsigned int prologue_count; /* number of .prologues seen so far */
735 /* Prologue counts at previous .label_state directives. */
736 struct label_prologue_count * saved_prologue_counts;
739 /* The input value is a negated offset from psp, and specifies an address
740 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
741 must add 16 and divide by 4 to get the encoded value. */
743 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
745 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
747 /* Forward declarations: */
748 static void set_section PARAMS ((char *name));
749 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
750 unsigned int, unsigned int));
751 static void dot_align (int);
752 static void dot_radix PARAMS ((int));
753 static void dot_special_section PARAMS ((int));
754 static void dot_proc PARAMS ((int));
755 static void dot_fframe PARAMS ((int));
756 static void dot_vframe PARAMS ((int));
757 static void dot_vframesp PARAMS ((int));
758 static void dot_vframepsp PARAMS ((int));
759 static void dot_save PARAMS ((int));
760 static void dot_restore PARAMS ((int));
761 static void dot_restorereg PARAMS ((int));
762 static void dot_restorereg_p PARAMS ((int));
763 static void dot_handlerdata PARAMS ((int));
764 static void dot_unwentry PARAMS ((int));
765 static void dot_altrp PARAMS ((int));
766 static void dot_savemem PARAMS ((int));
767 static void dot_saveg PARAMS ((int));
768 static void dot_savef PARAMS ((int));
769 static void dot_saveb PARAMS ((int));
770 static void dot_savegf PARAMS ((int));
771 static void dot_spill PARAMS ((int));
772 static void dot_spillreg PARAMS ((int));
773 static void dot_spillmem PARAMS ((int));
774 static void dot_spillreg_p PARAMS ((int));
775 static void dot_spillmem_p PARAMS ((int));
776 static void dot_label_state PARAMS ((int));
777 static void dot_copy_state PARAMS ((int));
778 static void dot_unwabi PARAMS ((int));
779 static void dot_personality PARAMS ((int));
780 static void dot_body PARAMS ((int));
781 static void dot_prologue PARAMS ((int));
782 static void dot_endp PARAMS ((int));
783 static void dot_template PARAMS ((int));
784 static void dot_regstk PARAMS ((int));
785 static void dot_rot PARAMS ((int));
786 static void dot_byteorder PARAMS ((int));
787 static void dot_psr PARAMS ((int));
788 static void dot_alias PARAMS ((int));
789 static void dot_ln PARAMS ((int));
790 static void cross_section PARAMS ((int ref, void (*cons) PARAMS((int)), int ua));
791 static void dot_xdata PARAMS ((int));
792 static void stmt_float_cons PARAMS ((int));
793 static void stmt_cons_ua PARAMS ((int));
794 static void dot_xfloat_cons PARAMS ((int));
795 static void dot_xstringer PARAMS ((int));
796 static void dot_xdata_ua PARAMS ((int));
797 static void dot_xfloat_cons_ua PARAMS ((int));
798 static void print_prmask PARAMS ((valueT mask));
799 static void dot_pred_rel PARAMS ((int));
800 static void dot_reg_val PARAMS ((int));
801 static void dot_serialize PARAMS ((int));
802 static void dot_dv_mode PARAMS ((int));
803 static void dot_entry PARAMS ((int));
804 static void dot_mem_offset PARAMS ((int));
805 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
806 static symbolS *declare_register PARAMS ((const char *name, int regnum));
807 static void declare_register_set PARAMS ((const char *, int, int));
808 static unsigned int operand_width PARAMS ((enum ia64_opnd));
809 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
812 static int parse_operand PARAMS ((expressionS *e));
813 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
814 static void build_insn PARAMS ((struct slot *, bfd_vma *));
815 static void emit_one_bundle PARAMS ((void));
816 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
817 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
818 bfd_reloc_code_real_type r_type));
819 static void insn_group_break PARAMS ((int, int, int));
820 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
821 struct rsrc *, int depind, int path));
822 static void add_qp_mutex PARAMS((valueT mask));
823 static void add_qp_imply PARAMS((int p1, int p2));
824 static void clear_qp_branch_flag PARAMS((valueT mask));
825 static void clear_qp_mutex PARAMS((valueT mask));
826 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
827 static int has_suffix_p PARAMS((const char *, const char *));
828 static void clear_register_values PARAMS ((void));
829 static void print_dependency PARAMS ((const char *action, int depind));
830 static void instruction_serialization PARAMS ((void));
831 static void data_serialization PARAMS ((void));
832 static void remove_marked_resource PARAMS ((struct rsrc *));
833 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
834 static int is_taken_branch PARAMS ((struct ia64_opcode *));
835 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
836 static int depends_on PARAMS ((int, struct ia64_opcode *));
837 static int specify_resource PARAMS ((const struct ia64_dependency *,
838 struct ia64_opcode *, int, struct rsrc [], int, int));
839 static int check_dv PARAMS((struct ia64_opcode *idesc));
840 static void check_dependencies PARAMS((struct ia64_opcode *));
841 static void mark_resources PARAMS((struct ia64_opcode *));
842 static void update_dependencies PARAMS((struct ia64_opcode *));
843 static void note_register_values PARAMS((struct ia64_opcode *));
844 static int qp_mutex PARAMS ((int, int, int));
845 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
846 static void output_vbyte_mem PARAMS ((int, char *, char *));
847 static void count_output PARAMS ((int, char *, char *));
848 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
849 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
850 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
851 static void output_P1_format PARAMS ((vbyte_func, int));
852 static void output_P2_format PARAMS ((vbyte_func, int, int));
853 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
854 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
855 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
856 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
857 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
858 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
859 static void output_P9_format PARAMS ((vbyte_func, int, int));
860 static void output_P10_format PARAMS ((vbyte_func, int, int));
861 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
862 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
863 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
864 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
865 static char format_ab_reg PARAMS ((int, int));
866 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
868 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
869 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
871 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
872 static unw_rec_list *output_endp PARAMS ((void));
873 static unw_rec_list *output_prologue PARAMS ((void));
874 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
875 static unw_rec_list *output_body PARAMS ((void));
876 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
877 static unw_rec_list *output_mem_stack_v PARAMS ((void));
878 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
879 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
880 static unw_rec_list *output_rp_when PARAMS ((void));
881 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
882 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
883 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
884 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
885 static unw_rec_list *output_pfs_when PARAMS ((void));
886 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
887 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
888 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
889 static unw_rec_list *output_preds_when PARAMS ((void));
890 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
891 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
892 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
893 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
894 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
895 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
896 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
897 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
898 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
899 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
900 static unw_rec_list *output_unat_when PARAMS ((void));
901 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
902 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
903 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
904 static unw_rec_list *output_lc_when PARAMS ((void));
905 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
906 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
907 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
908 static unw_rec_list *output_fpsr_when PARAMS ((void));
909 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
910 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
911 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
912 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
913 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
914 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
915 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
916 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
917 static unw_rec_list *output_bsp_when PARAMS ((void));
918 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
919 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
920 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
921 static unw_rec_list *output_bspstore_when PARAMS ((void));
922 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
923 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
924 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
925 static unw_rec_list *output_rnat_when PARAMS ((void));
926 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
927 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
928 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
929 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
930 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
931 static unw_rec_list *output_label_state PARAMS ((unsigned long));
932 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
933 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
934 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
935 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
937 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
939 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
941 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
942 unsigned int, unsigned int));
943 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
944 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
945 static int calc_record_size PARAMS ((unw_rec_list *));
946 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
947 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
948 unsigned long, fragS *,
950 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
951 static void fixup_unw_records PARAMS ((unw_rec_list *, int));
952 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
953 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
954 static unsigned int get_saved_prologue_count PARAMS ((unsigned long));
955 static void save_prologue_count PARAMS ((unsigned long, unsigned int));
956 static void free_saved_prologue_counts PARAMS ((void));
958 /* Determine if application register REGNUM resides only in the integer
959 unit (as opposed to the memory unit). */
961 ar_is_only_in_integer_unit (int reg)
964 return reg >= 64 && reg <= 111;
967 /* Determine if application register REGNUM resides only in the memory
968 unit (as opposed to the integer unit). */
970 ar_is_only_in_memory_unit (int reg)
973 return reg >= 0 && reg <= 47;
976 /* Switch to section NAME and create section if necessary. It's
977 rather ugly that we have to manipulate input_line_pointer but I
978 don't see any other way to accomplish the same thing without
979 changing obj-elf.c (which may be the Right Thing, in the end). */
984 char *saved_input_line_pointer;
986 saved_input_line_pointer = input_line_pointer;
987 input_line_pointer = name;
989 input_line_pointer = saved_input_line_pointer;
992 /* Map 's' to SHF_IA_64_SHORT. */
995 ia64_elf_section_letter (letter, ptr_msg)
1000 return SHF_IA_64_SHORT;
1001 else if (letter == 'o')
1002 return SHF_LINK_ORDER;
1004 *ptr_msg = _("Bad .section directive: want a,o,s,w,x,M,S,G,T in string");
1008 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
1011 ia64_elf_section_flags (flags, attr, type)
1013 int attr, type ATTRIBUTE_UNUSED;
1015 if (attr & SHF_IA_64_SHORT)
1016 flags |= SEC_SMALL_DATA;
1021 ia64_elf_section_type (str, len)
1025 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
1027 if (STREQ (ELF_STRING_ia64_unwind_info))
1028 return SHT_PROGBITS;
1030 if (STREQ (ELF_STRING_ia64_unwind_info_once))
1031 return SHT_PROGBITS;
1033 if (STREQ (ELF_STRING_ia64_unwind))
1034 return SHT_IA_64_UNWIND;
1036 if (STREQ (ELF_STRING_ia64_unwind_once))
1037 return SHT_IA_64_UNWIND;
1039 if (STREQ ("unwind"))
1040 return SHT_IA_64_UNWIND;
1047 set_regstack (ins, locs, outs, rots)
1048 unsigned int ins, locs, outs, rots;
1050 /* Size of frame. */
1053 sof = ins + locs + outs;
1056 as_bad ("Size of frame exceeds maximum of 96 registers");
1061 as_warn ("Size of rotating registers exceeds frame size");
1064 md.in.base = REG_GR + 32;
1065 md.loc.base = md.in.base + ins;
1066 md.out.base = md.loc.base + locs;
1068 md.in.num_regs = ins;
1069 md.loc.num_regs = locs;
1070 md.out.num_regs = outs;
1071 md.rot.num_regs = rots;
1078 struct label_fix *lfix;
1080 subsegT saved_subseg;
1083 if (!md.last_text_seg)
1086 saved_seg = now_seg;
1087 saved_subseg = now_subseg;
1089 subseg_set (md.last_text_seg, 0);
1091 while (md.num_slots_in_use > 0)
1092 emit_one_bundle (); /* force out queued instructions */
1094 /* In case there are labels following the last instruction, resolve
1096 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
1098 S_SET_VALUE (lfix->sym, frag_now_fix ());
1099 symbol_set_frag (lfix->sym, frag_now);
1101 CURR_SLOT.label_fixups = 0;
1102 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1104 S_SET_VALUE (lfix->sym, frag_now_fix ());
1105 symbol_set_frag (lfix->sym, frag_now);
1107 CURR_SLOT.tag_fixups = 0;
1109 /* In case there are unwind directives following the last instruction,
1110 resolve those now. We only handle prologue, body, and endp directives
1111 here. Give an error for others. */
1112 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1114 switch (ptr->r.type)
1120 ptr->slot_number = (unsigned long) frag_more (0);
1121 ptr->slot_frag = frag_now;
1124 /* Allow any record which doesn't have a "t" field (i.e.,
1125 doesn't relate to a particular instruction). */
1141 as_bad (_("Unwind directive not followed by an instruction."));
1145 unwind.current_entry = NULL;
1147 subseg_set (saved_seg, saved_subseg);
1149 if (md.qp.X_op == O_register)
1150 as_bad ("qualifying predicate not followed by instruction");
1154 ia64_do_align (int nbytes)
1156 char *saved_input_line_pointer = input_line_pointer;
1158 input_line_pointer = "";
1159 s_align_bytes (nbytes);
1160 input_line_pointer = saved_input_line_pointer;
1164 ia64_cons_align (nbytes)
1169 char *saved_input_line_pointer = input_line_pointer;
1170 input_line_pointer = "";
1171 s_align_bytes (nbytes);
1172 input_line_pointer = saved_input_line_pointer;
1176 /* Output COUNT bytes to a memory location. */
1177 static char *vbyte_mem_ptr = NULL;
1180 output_vbyte_mem (count, ptr, comment)
1183 char *comment ATTRIBUTE_UNUSED;
1186 if (vbyte_mem_ptr == NULL)
1191 for (x = 0; x < count; x++)
1192 *(vbyte_mem_ptr++) = ptr[x];
1195 /* Count the number of bytes required for records. */
1196 static int vbyte_count = 0;
1198 count_output (count, ptr, comment)
1200 char *ptr ATTRIBUTE_UNUSED;
1201 char *comment ATTRIBUTE_UNUSED;
1203 vbyte_count += count;
1207 output_R1_format (f, rtype, rlen)
1209 unw_record_type rtype;
1216 output_R3_format (f, rtype, rlen);
1222 else if (rtype != prologue)
1223 as_bad ("record type is not valid");
1225 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1226 (*f) (1, &byte, NULL);
1230 output_R2_format (f, mask, grsave, rlen)
1237 mask = (mask & 0x0f);
1238 grsave = (grsave & 0x7f);
1240 bytes[0] = (UNW_R2 | (mask >> 1));
1241 bytes[1] = (((mask & 0x01) << 7) | grsave);
1242 count += output_leb128 (bytes + 2, rlen, 0);
1243 (*f) (count, bytes, NULL);
1247 output_R3_format (f, rtype, rlen)
1249 unw_record_type rtype;
1256 output_R1_format (f, rtype, rlen);
1262 else if (rtype != prologue)
1263 as_bad ("record type is not valid");
1264 bytes[0] = (UNW_R3 | r);
1265 count = output_leb128 (bytes + 1, rlen, 0);
1266 (*f) (count + 1, bytes, NULL);
1270 output_P1_format (f, brmask)
1275 byte = UNW_P1 | (brmask & 0x1f);
1276 (*f) (1, &byte, NULL);
1280 output_P2_format (f, brmask, gr)
1286 brmask = (brmask & 0x1f);
1287 bytes[0] = UNW_P2 | (brmask >> 1);
1288 bytes[1] = (((brmask & 1) << 7) | gr);
1289 (*f) (2, bytes, NULL);
1293 output_P3_format (f, rtype, reg)
1295 unw_record_type rtype;
1340 as_bad ("Invalid record type for P3 format.");
1342 bytes[0] = (UNW_P3 | (r >> 1));
1343 bytes[1] = (((r & 1) << 7) | reg);
1344 (*f) (2, bytes, NULL);
1348 output_P4_format (f, imask, imask_size)
1350 unsigned char *imask;
1351 unsigned long imask_size;
1354 (*f) (imask_size, (char *) imask, NULL);
1358 output_P5_format (f, grmask, frmask)
1361 unsigned long frmask;
1364 grmask = (grmask & 0x0f);
1367 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1368 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1369 bytes[3] = (frmask & 0x000000ff);
1370 (*f) (4, bytes, NULL);
1374 output_P6_format (f, rtype, rmask)
1376 unw_record_type rtype;
1382 if (rtype == gr_mem)
1384 else if (rtype != fr_mem)
1385 as_bad ("Invalid record type for format P6");
1386 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1387 (*f) (1, &byte, NULL);
1391 output_P7_format (f, rtype, w1, w2)
1393 unw_record_type rtype;
1400 count += output_leb128 (bytes + 1, w1, 0);
1405 count += output_leb128 (bytes + count, w2 >> 4, 0);
1455 bytes[0] = (UNW_P7 | r);
1456 (*f) (count, bytes, NULL);
1460 output_P8_format (f, rtype, t)
1462 unw_record_type rtype;
1501 case bspstore_psprel:
1504 case bspstore_sprel:
1516 case priunat_when_gr:
1519 case priunat_psprel:
1525 case priunat_when_mem:
1532 count += output_leb128 (bytes + 2, t, 0);
1533 (*f) (count, bytes, NULL);
1537 output_P9_format (f, grmask, gr)
1544 bytes[1] = (grmask & 0x0f);
1545 bytes[2] = (gr & 0x7f);
1546 (*f) (3, bytes, NULL);
1550 output_P10_format (f, abi, context)
1557 bytes[1] = (abi & 0xff);
1558 bytes[2] = (context & 0xff);
1559 (*f) (3, bytes, NULL);
1563 output_B1_format (f, rtype, label)
1565 unw_record_type rtype;
1566 unsigned long label;
1572 output_B4_format (f, rtype, label);
1575 if (rtype == copy_state)
1577 else if (rtype != label_state)
1578 as_bad ("Invalid record type for format B1");
1580 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1581 (*f) (1, &byte, NULL);
1585 output_B2_format (f, ecount, t)
1587 unsigned long ecount;
1594 output_B3_format (f, ecount, t);
1597 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1598 count += output_leb128 (bytes + 1, t, 0);
1599 (*f) (count, bytes, NULL);
1603 output_B3_format (f, ecount, t)
1605 unsigned long ecount;
1612 output_B2_format (f, ecount, t);
1616 count += output_leb128 (bytes + 1, t, 0);
1617 count += output_leb128 (bytes + count, ecount, 0);
1618 (*f) (count, bytes, NULL);
1622 output_B4_format (f, rtype, label)
1624 unw_record_type rtype;
1625 unsigned long label;
1632 output_B1_format (f, rtype, label);
1636 if (rtype == copy_state)
1638 else if (rtype != label_state)
1639 as_bad ("Invalid record type for format B1");
1641 bytes[0] = (UNW_B4 | (r << 3));
1642 count += output_leb128 (bytes + 1, label, 0);
1643 (*f) (count, bytes, NULL);
1647 format_ab_reg (ab, reg)
1654 ret = (ab << 5) | reg;
1659 output_X1_format (f, rtype, ab, reg, t, w1)
1661 unw_record_type rtype;
1671 if (rtype == spill_sprel)
1673 else if (rtype != spill_psprel)
1674 as_bad ("Invalid record type for format X1");
1675 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1676 count += output_leb128 (bytes + 2, t, 0);
1677 count += output_leb128 (bytes + count, w1, 0);
1678 (*f) (count, bytes, NULL);
1682 output_X2_format (f, ab, reg, x, y, treg, t)
1691 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1692 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1693 count += output_leb128 (bytes + 3, t, 0);
1694 (*f) (count, bytes, NULL);
1698 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1700 unw_record_type rtype;
1711 if (rtype == spill_sprel_p)
1713 else if (rtype != spill_psprel_p)
1714 as_bad ("Invalid record type for format X3");
1715 bytes[1] = ((r << 7) | (qp & 0x3f));
1716 bytes[2] = format_ab_reg (ab, reg);
1717 count += output_leb128 (bytes + 3, t, 0);
1718 count += output_leb128 (bytes + count, w1, 0);
1719 (*f) (count, bytes, NULL);
1723 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1733 bytes[1] = (qp & 0x3f);
1734 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1735 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1736 count += output_leb128 (bytes + 4, t, 0);
1737 (*f) (count, bytes, NULL);
1740 /* This function allocates a record list structure, and initializes fields. */
1742 static unw_rec_list *
1743 alloc_record (unw_record_type t)
1746 ptr = xmalloc (sizeof (*ptr));
1748 ptr->slot_number = SLOT_NUM_NOT_SET;
1750 ptr->next_slot_number = 0;
1751 ptr->next_slot_frag = 0;
1755 /* Dummy unwind record used for calculating the length of the last prologue or
1758 static unw_rec_list *
1761 unw_rec_list *ptr = alloc_record (endp);
1765 static unw_rec_list *
1768 unw_rec_list *ptr = alloc_record (prologue);
1769 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1773 static unw_rec_list *
1774 output_prologue_gr (saved_mask, reg)
1775 unsigned int saved_mask;
1778 unw_rec_list *ptr = alloc_record (prologue_gr);
1779 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1780 ptr->r.record.r.grmask = saved_mask;
1781 ptr->r.record.r.grsave = reg;
1785 static unw_rec_list *
1788 unw_rec_list *ptr = alloc_record (body);
1792 static unw_rec_list *
1793 output_mem_stack_f (size)
1796 unw_rec_list *ptr = alloc_record (mem_stack_f);
1797 ptr->r.record.p.size = size;
1801 static unw_rec_list *
1802 output_mem_stack_v ()
1804 unw_rec_list *ptr = alloc_record (mem_stack_v);
1808 static unw_rec_list *
1812 unw_rec_list *ptr = alloc_record (psp_gr);
1813 ptr->r.record.p.gr = gr;
1817 static unw_rec_list *
1818 output_psp_sprel (offset)
1819 unsigned int offset;
1821 unw_rec_list *ptr = alloc_record (psp_sprel);
1822 ptr->r.record.p.spoff = offset / 4;
1826 static unw_rec_list *
1829 unw_rec_list *ptr = alloc_record (rp_when);
1833 static unw_rec_list *
1837 unw_rec_list *ptr = alloc_record (rp_gr);
1838 ptr->r.record.p.gr = gr;
1842 static unw_rec_list *
1846 unw_rec_list *ptr = alloc_record (rp_br);
1847 ptr->r.record.p.br = br;
1851 static unw_rec_list *
1852 output_rp_psprel (offset)
1853 unsigned int offset;
1855 unw_rec_list *ptr = alloc_record (rp_psprel);
1856 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1860 static unw_rec_list *
1861 output_rp_sprel (offset)
1862 unsigned int offset;
1864 unw_rec_list *ptr = alloc_record (rp_sprel);
1865 ptr->r.record.p.spoff = offset / 4;
1869 static unw_rec_list *
1872 unw_rec_list *ptr = alloc_record (pfs_when);
1876 static unw_rec_list *
1880 unw_rec_list *ptr = alloc_record (pfs_gr);
1881 ptr->r.record.p.gr = gr;
1885 static unw_rec_list *
1886 output_pfs_psprel (offset)
1887 unsigned int offset;
1889 unw_rec_list *ptr = alloc_record (pfs_psprel);
1890 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1894 static unw_rec_list *
1895 output_pfs_sprel (offset)
1896 unsigned int offset;
1898 unw_rec_list *ptr = alloc_record (pfs_sprel);
1899 ptr->r.record.p.spoff = offset / 4;
1903 static unw_rec_list *
1904 output_preds_when ()
1906 unw_rec_list *ptr = alloc_record (preds_when);
1910 static unw_rec_list *
1911 output_preds_gr (gr)
1914 unw_rec_list *ptr = alloc_record (preds_gr);
1915 ptr->r.record.p.gr = gr;
1919 static unw_rec_list *
1920 output_preds_psprel (offset)
1921 unsigned int offset;
1923 unw_rec_list *ptr = alloc_record (preds_psprel);
1924 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1928 static unw_rec_list *
1929 output_preds_sprel (offset)
1930 unsigned int offset;
1932 unw_rec_list *ptr = alloc_record (preds_sprel);
1933 ptr->r.record.p.spoff = offset / 4;
1937 static unw_rec_list *
1938 output_fr_mem (mask)
1941 unw_rec_list *ptr = alloc_record (fr_mem);
1942 ptr->r.record.p.rmask = mask;
1946 static unw_rec_list *
1947 output_frgr_mem (gr_mask, fr_mask)
1948 unsigned int gr_mask;
1949 unsigned int fr_mask;
1951 unw_rec_list *ptr = alloc_record (frgr_mem);
1952 ptr->r.record.p.grmask = gr_mask;
1953 ptr->r.record.p.frmask = fr_mask;
1957 static unw_rec_list *
1958 output_gr_gr (mask, reg)
1962 unw_rec_list *ptr = alloc_record (gr_gr);
1963 ptr->r.record.p.grmask = mask;
1964 ptr->r.record.p.gr = reg;
1968 static unw_rec_list *
1969 output_gr_mem (mask)
1972 unw_rec_list *ptr = alloc_record (gr_mem);
1973 ptr->r.record.p.rmask = mask;
1977 static unw_rec_list *
1978 output_br_mem (unsigned int mask)
1980 unw_rec_list *ptr = alloc_record (br_mem);
1981 ptr->r.record.p.brmask = mask;
1985 static unw_rec_list *
1986 output_br_gr (save_mask, reg)
1987 unsigned int save_mask;
1990 unw_rec_list *ptr = alloc_record (br_gr);
1991 ptr->r.record.p.brmask = save_mask;
1992 ptr->r.record.p.gr = reg;
1996 static unw_rec_list *
1997 output_spill_base (offset)
1998 unsigned int offset;
2000 unw_rec_list *ptr = alloc_record (spill_base);
2001 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2005 static unw_rec_list *
2008 unw_rec_list *ptr = alloc_record (unat_when);
2012 static unw_rec_list *
2016 unw_rec_list *ptr = alloc_record (unat_gr);
2017 ptr->r.record.p.gr = gr;
2021 static unw_rec_list *
2022 output_unat_psprel (offset)
2023 unsigned int offset;
2025 unw_rec_list *ptr = alloc_record (unat_psprel);
2026 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2030 static unw_rec_list *
2031 output_unat_sprel (offset)
2032 unsigned int offset;
2034 unw_rec_list *ptr = alloc_record (unat_sprel);
2035 ptr->r.record.p.spoff = offset / 4;
2039 static unw_rec_list *
2042 unw_rec_list *ptr = alloc_record (lc_when);
2046 static unw_rec_list *
2050 unw_rec_list *ptr = alloc_record (lc_gr);
2051 ptr->r.record.p.gr = gr;
2055 static unw_rec_list *
2056 output_lc_psprel (offset)
2057 unsigned int offset;
2059 unw_rec_list *ptr = alloc_record (lc_psprel);
2060 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2064 static unw_rec_list *
2065 output_lc_sprel (offset)
2066 unsigned int offset;
2068 unw_rec_list *ptr = alloc_record (lc_sprel);
2069 ptr->r.record.p.spoff = offset / 4;
2073 static unw_rec_list *
2076 unw_rec_list *ptr = alloc_record (fpsr_when);
2080 static unw_rec_list *
2084 unw_rec_list *ptr = alloc_record (fpsr_gr);
2085 ptr->r.record.p.gr = gr;
2089 static unw_rec_list *
2090 output_fpsr_psprel (offset)
2091 unsigned int offset;
2093 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2094 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2098 static unw_rec_list *
2099 output_fpsr_sprel (offset)
2100 unsigned int offset;
2102 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2103 ptr->r.record.p.spoff = offset / 4;
2107 static unw_rec_list *
2108 output_priunat_when_gr ()
2110 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2114 static unw_rec_list *
2115 output_priunat_when_mem ()
2117 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2121 static unw_rec_list *
2122 output_priunat_gr (gr)
2125 unw_rec_list *ptr = alloc_record (priunat_gr);
2126 ptr->r.record.p.gr = gr;
2130 static unw_rec_list *
2131 output_priunat_psprel (offset)
2132 unsigned int offset;
2134 unw_rec_list *ptr = alloc_record (priunat_psprel);
2135 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2139 static unw_rec_list *
2140 output_priunat_sprel (offset)
2141 unsigned int offset;
2143 unw_rec_list *ptr = alloc_record (priunat_sprel);
2144 ptr->r.record.p.spoff = offset / 4;
2148 static unw_rec_list *
2151 unw_rec_list *ptr = alloc_record (bsp_when);
2155 static unw_rec_list *
2159 unw_rec_list *ptr = alloc_record (bsp_gr);
2160 ptr->r.record.p.gr = gr;
2164 static unw_rec_list *
2165 output_bsp_psprel (offset)
2166 unsigned int offset;
2168 unw_rec_list *ptr = alloc_record (bsp_psprel);
2169 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2173 static unw_rec_list *
2174 output_bsp_sprel (offset)
2175 unsigned int offset;
2177 unw_rec_list *ptr = alloc_record (bsp_sprel);
2178 ptr->r.record.p.spoff = offset / 4;
2182 static unw_rec_list *
2183 output_bspstore_when ()
2185 unw_rec_list *ptr = alloc_record (bspstore_when);
2189 static unw_rec_list *
2190 output_bspstore_gr (gr)
2193 unw_rec_list *ptr = alloc_record (bspstore_gr);
2194 ptr->r.record.p.gr = gr;
2198 static unw_rec_list *
2199 output_bspstore_psprel (offset)
2200 unsigned int offset;
2202 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2203 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2207 static unw_rec_list *
2208 output_bspstore_sprel (offset)
2209 unsigned int offset;
2211 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2212 ptr->r.record.p.spoff = offset / 4;
2216 static unw_rec_list *
2219 unw_rec_list *ptr = alloc_record (rnat_when);
2223 static unw_rec_list *
2227 unw_rec_list *ptr = alloc_record (rnat_gr);
2228 ptr->r.record.p.gr = gr;
2232 static unw_rec_list *
2233 output_rnat_psprel (offset)
2234 unsigned int offset;
2236 unw_rec_list *ptr = alloc_record (rnat_psprel);
2237 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2241 static unw_rec_list *
2242 output_rnat_sprel (offset)
2243 unsigned int offset;
2245 unw_rec_list *ptr = alloc_record (rnat_sprel);
2246 ptr->r.record.p.spoff = offset / 4;
2250 static unw_rec_list *
2251 output_unwabi (abi, context)
2253 unsigned long context;
2255 unw_rec_list *ptr = alloc_record (unwabi);
2256 ptr->r.record.p.abi = abi;
2257 ptr->r.record.p.context = context;
2261 static unw_rec_list *
2262 output_epilogue (unsigned long ecount)
2264 unw_rec_list *ptr = alloc_record (epilogue);
2265 ptr->r.record.b.ecount = ecount;
2269 static unw_rec_list *
2270 output_label_state (unsigned long label)
2272 unw_rec_list *ptr = alloc_record (label_state);
2273 ptr->r.record.b.label = label;
2277 static unw_rec_list *
2278 output_copy_state (unsigned long label)
2280 unw_rec_list *ptr = alloc_record (copy_state);
2281 ptr->r.record.b.label = label;
2285 static unw_rec_list *
2286 output_spill_psprel (ab, reg, offset)
2289 unsigned int offset;
2291 unw_rec_list *ptr = alloc_record (spill_psprel);
2292 ptr->r.record.x.ab = ab;
2293 ptr->r.record.x.reg = reg;
2294 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2298 static unw_rec_list *
2299 output_spill_sprel (ab, reg, offset)
2302 unsigned int offset;
2304 unw_rec_list *ptr = alloc_record (spill_sprel);
2305 ptr->r.record.x.ab = ab;
2306 ptr->r.record.x.reg = reg;
2307 ptr->r.record.x.spoff = offset / 4;
2311 static unw_rec_list *
2312 output_spill_psprel_p (ab, reg, offset, predicate)
2315 unsigned int offset;
2316 unsigned int predicate;
2318 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2319 ptr->r.record.x.ab = ab;
2320 ptr->r.record.x.reg = reg;
2321 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2322 ptr->r.record.x.qp = predicate;
2326 static unw_rec_list *
2327 output_spill_sprel_p (ab, reg, offset, predicate)
2330 unsigned int offset;
2331 unsigned int predicate;
2333 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2334 ptr->r.record.x.ab = ab;
2335 ptr->r.record.x.reg = reg;
2336 ptr->r.record.x.spoff = offset / 4;
2337 ptr->r.record.x.qp = predicate;
2341 static unw_rec_list *
2342 output_spill_reg (ab, reg, targ_reg, xy)
2345 unsigned int targ_reg;
2348 unw_rec_list *ptr = alloc_record (spill_reg);
2349 ptr->r.record.x.ab = ab;
2350 ptr->r.record.x.reg = reg;
2351 ptr->r.record.x.treg = targ_reg;
2352 ptr->r.record.x.xy = xy;
2356 static unw_rec_list *
2357 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2360 unsigned int targ_reg;
2362 unsigned int predicate;
2364 unw_rec_list *ptr = alloc_record (spill_reg_p);
2365 ptr->r.record.x.ab = ab;
2366 ptr->r.record.x.reg = reg;
2367 ptr->r.record.x.treg = targ_reg;
2368 ptr->r.record.x.xy = xy;
2369 ptr->r.record.x.qp = predicate;
2373 /* Given a unw_rec_list process the correct format with the
2374 specified function. */
2377 process_one_record (ptr, f)
2381 unsigned long fr_mask, gr_mask;
2383 switch (ptr->r.type)
2385 /* This is a dummy record that takes up no space in the output. */
2393 /* These are taken care of by prologue/prologue_gr. */
2398 if (ptr->r.type == prologue_gr)
2399 output_R2_format (f, ptr->r.record.r.grmask,
2400 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2402 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2404 /* Output descriptor(s) for union of register spills (if any). */
2405 gr_mask = ptr->r.record.r.mask.gr_mem;
2406 fr_mask = ptr->r.record.r.mask.fr_mem;
2409 if ((fr_mask & ~0xfUL) == 0)
2410 output_P6_format (f, fr_mem, fr_mask);
2413 output_P5_format (f, gr_mask, fr_mask);
2418 output_P6_format (f, gr_mem, gr_mask);
2419 if (ptr->r.record.r.mask.br_mem)
2420 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2422 /* output imask descriptor if necessary: */
2423 if (ptr->r.record.r.mask.i)
2424 output_P4_format (f, ptr->r.record.r.mask.i,
2425 ptr->r.record.r.imask_size);
2429 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2433 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2434 ptr->r.record.p.size);
2447 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2450 output_P3_format (f, rp_br, ptr->r.record.p.br);
2453 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2461 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2470 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2480 case bspstore_sprel:
2482 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2485 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2488 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2491 as_bad ("spill_mask record unimplemented.");
2493 case priunat_when_gr:
2494 case priunat_when_mem:
2498 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2500 case priunat_psprel:
2502 case bspstore_psprel:
2504 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2507 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2510 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2514 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2517 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2518 ptr->r.record.x.reg, ptr->r.record.x.t,
2519 ptr->r.record.x.pspoff);
2522 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2523 ptr->r.record.x.reg, ptr->r.record.x.t,
2524 ptr->r.record.x.spoff);
2527 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2528 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2529 ptr->r.record.x.treg, ptr->r.record.x.t);
2531 case spill_psprel_p:
2532 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2533 ptr->r.record.x.ab, ptr->r.record.x.reg,
2534 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2537 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2538 ptr->r.record.x.ab, ptr->r.record.x.reg,
2539 ptr->r.record.x.t, ptr->r.record.x.spoff);
2542 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2543 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2544 ptr->r.record.x.xy, ptr->r.record.x.treg,
2548 as_bad ("record_type_not_valid");
2553 /* Given a unw_rec_list list, process all the records with
2554 the specified function. */
2556 process_unw_records (list, f)
2561 for (ptr = list; ptr; ptr = ptr->next)
2562 process_one_record (ptr, f);
2565 /* Determine the size of a record list in bytes. */
2567 calc_record_size (list)
2571 process_unw_records (list, count_output);
2575 /* Update IMASK bitmask to reflect the fact that one or more registers
2576 of type TYPE are saved starting at instruction with index T. If N
2577 bits are set in REGMASK, it is assumed that instructions T through
2578 T+N-1 save these registers.
2582 1: instruction saves next fp reg
2583 2: instruction saves next general reg
2584 3: instruction saves next branch reg */
2586 set_imask (region, regmask, t, type)
2587 unw_rec_list *region;
2588 unsigned long regmask;
2592 unsigned char *imask;
2593 unsigned long imask_size;
2597 imask = region->r.record.r.mask.i;
2598 imask_size = region->r.record.r.imask_size;
2601 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2602 imask = xmalloc (imask_size);
2603 memset (imask, 0, imask_size);
2605 region->r.record.r.imask_size = imask_size;
2606 region->r.record.r.mask.i = imask;
2610 pos = 2 * (3 - t % 4);
2613 if (i >= imask_size)
2615 as_bad ("Ignoring attempt to spill beyond end of region");
2619 imask[i] |= (type & 0x3) << pos;
2621 regmask &= (regmask - 1);
2631 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2632 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2633 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2637 slot_index (slot_addr, slot_frag, first_addr, first_frag, before_relax)
2638 unsigned long slot_addr;
2640 unsigned long first_addr;
2644 unsigned long index = 0;
2646 /* First time we are called, the initial address and frag are invalid. */
2647 if (first_addr == 0)
2650 /* If the two addresses are in different frags, then we need to add in
2651 the remaining size of this frag, and then the entire size of intermediate
2653 while (slot_frag != first_frag)
2655 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2659 /* We can get the final addresses only during and after
2661 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2662 index += 3 * ((first_frag->fr_next->fr_address
2663 - first_frag->fr_address
2664 - first_frag->fr_fix) >> 4);
2667 /* We don't know what the final addresses will be. We try our
2668 best to estimate. */
2669 switch (first_frag->fr_type)
2675 as_fatal ("only constant space allocation is supported");
2681 /* Take alignment into account. Assume the worst case
2682 before relaxation. */
2683 index += 3 * ((1 << first_frag->fr_offset) >> 4);
2687 if (first_frag->fr_symbol)
2689 as_fatal ("only constant offsets are supported");
2693 index += 3 * (first_frag->fr_offset >> 4);
2697 /* Add in the full size of the frag converted to instruction slots. */
2698 index += 3 * (first_frag->fr_fix >> 4);
2699 /* Subtract away the initial part before first_addr. */
2700 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2701 + ((first_addr & 0x3) - (start_addr & 0x3)));
2703 /* Move to the beginning of the next frag. */
2704 first_frag = first_frag->fr_next;
2705 first_addr = (unsigned long) &first_frag->fr_literal;
2708 /* Add in the used part of the last frag. */
2709 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2710 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2714 /* Optimize unwind record directives. */
2716 static unw_rec_list *
2717 optimize_unw_records (list)
2723 /* If the only unwind record is ".prologue" or ".prologue" followed
2724 by ".body", then we can optimize the unwind directives away. */
2725 if (list->r.type == prologue
2726 && (list->next->r.type == endp
2727 || (list->next->r.type == body && list->next->next->r.type == endp)))
2733 /* Given a complete record list, process any records which have
2734 unresolved fields, (ie length counts for a prologue). After
2735 this has been run, all necessary information should be available
2736 within each record to generate an image. */
2739 fixup_unw_records (list, before_relax)
2743 unw_rec_list *ptr, *region = 0;
2744 unsigned long first_addr = 0, rlen = 0, t;
2745 fragS *first_frag = 0;
2747 for (ptr = list; ptr; ptr = ptr->next)
2749 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2750 as_bad (" Insn slot not set in unwind record.");
2751 t = slot_index (ptr->slot_number, ptr->slot_frag,
2752 first_addr, first_frag, before_relax);
2753 switch (ptr->r.type)
2761 unsigned long last_addr = 0;
2762 fragS *last_frag = NULL;
2764 first_addr = ptr->slot_number;
2765 first_frag = ptr->slot_frag;
2766 /* Find either the next body/prologue start, or the end of
2767 the function, and determine the size of the region. */
2768 for (last = ptr->next; last != NULL; last = last->next)
2769 if (last->r.type == prologue || last->r.type == prologue_gr
2770 || last->r.type == body || last->r.type == endp)
2772 last_addr = last->slot_number;
2773 last_frag = last->slot_frag;
2776 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2778 rlen = ptr->r.record.r.rlen = size;
2779 if (ptr->r.type == body)
2780 /* End of region. */
2788 ptr->r.record.b.t = rlen - 1 - t;
2790 /* This happens when a memory-stack-less procedure uses a
2791 ".restore sp" directive at the end of a region to pop
2793 ptr->r.record.b.t = 0;
2804 case priunat_when_gr:
2805 case priunat_when_mem:
2809 ptr->r.record.p.t = t;
2817 case spill_psprel_p:
2818 ptr->r.record.x.t = t;
2824 as_bad ("frgr_mem record before region record!");
2827 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2828 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2829 set_imask (region, ptr->r.record.p.frmask, t, 1);
2830 set_imask (region, ptr->r.record.p.grmask, t, 2);
2835 as_bad ("fr_mem record before region record!");
2838 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2839 set_imask (region, ptr->r.record.p.rmask, t, 1);
2844 as_bad ("gr_mem record before region record!");
2847 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2848 set_imask (region, ptr->r.record.p.rmask, t, 2);
2853 as_bad ("br_mem record before region record!");
2856 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2857 set_imask (region, ptr->r.record.p.brmask, t, 3);
2863 as_bad ("gr_gr record before region record!");
2866 set_imask (region, ptr->r.record.p.grmask, t, 2);
2871 as_bad ("br_gr record before region record!");
2874 set_imask (region, ptr->r.record.p.brmask, t, 3);
2883 /* Estimate the size of a frag before relaxing. We only have one type of frag
2884 to handle here, which is the unwind info frag. */
2887 ia64_estimate_size_before_relax (fragS *frag,
2888 asection *segtype ATTRIBUTE_UNUSED)
2893 /* ??? This code is identical to the first part of ia64_convert_frag. */
2894 list = (unw_rec_list *) frag->fr_opcode;
2895 fixup_unw_records (list, 0);
2897 len = calc_record_size (list);
2898 /* pad to pointer-size boundary. */
2899 pad = len % md.pointer_size;
2901 len += md.pointer_size - pad;
2902 /* Add 8 for the header. */
2904 /* Add a pointer for the personality offset. */
2905 if (frag->fr_offset)
2906 size += md.pointer_size;
2908 /* fr_var carries the max_chars that we created the fragment with.
2909 We must, of course, have allocated enough memory earlier. */
2910 assert (frag->fr_var >= size);
2912 return frag->fr_fix + size;
2915 /* This function converts a rs_machine_dependent variant frag into a
2916 normal fill frag with the unwind image from the the record list. */
2918 ia64_convert_frag (fragS *frag)
2924 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2925 list = (unw_rec_list *) frag->fr_opcode;
2926 fixup_unw_records (list, 0);
2928 len = calc_record_size (list);
2929 /* pad to pointer-size boundary. */
2930 pad = len % md.pointer_size;
2932 len += md.pointer_size - pad;
2933 /* Add 8 for the header. */
2935 /* Add a pointer for the personality offset. */
2936 if (frag->fr_offset)
2937 size += md.pointer_size;
2939 /* fr_var carries the max_chars that we created the fragment with.
2940 We must, of course, have allocated enough memory earlier. */
2941 assert (frag->fr_var >= size);
2943 /* Initialize the header area. fr_offset is initialized with
2944 unwind.personality_routine. */
2945 if (frag->fr_offset)
2947 if (md.flags & EF_IA_64_ABI64)
2948 flag_value = (bfd_vma) 3 << 32;
2950 /* 32-bit unwind info block. */
2951 flag_value = (bfd_vma) 0x1003 << 32;
2956 md_number_to_chars (frag->fr_literal,
2957 (((bfd_vma) 1 << 48) /* Version. */
2958 | flag_value /* U & E handler flags. */
2959 | (len / md.pointer_size)), /* Length. */
2962 /* Skip the header. */
2963 vbyte_mem_ptr = frag->fr_literal + 8;
2964 process_unw_records (list, output_vbyte_mem);
2966 /* Fill the padding bytes with zeros. */
2968 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
2969 md.pointer_size - pad);
2971 frag->fr_fix += size;
2972 frag->fr_type = rs_fill;
2974 frag->fr_offset = 0;
2978 convert_expr_to_ab_reg (e, ab, regp)
2985 if (e->X_op != O_register)
2988 reg = e->X_add_number;
2989 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2992 *regp = reg - REG_GR;
2994 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2995 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2998 *regp = reg - REG_FR;
3000 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3003 *regp = reg - REG_BR;
3010 case REG_PR: *regp = 0; break;
3011 case REG_PSP: *regp = 1; break;
3012 case REG_PRIUNAT: *regp = 2; break;
3013 case REG_BR + 0: *regp = 3; break;
3014 case REG_AR + AR_BSP: *regp = 4; break;
3015 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3016 case REG_AR + AR_RNAT: *regp = 6; break;
3017 case REG_AR + AR_UNAT: *regp = 7; break;
3018 case REG_AR + AR_FPSR: *regp = 8; break;
3019 case REG_AR + AR_PFS: *regp = 9; break;
3020 case REG_AR + AR_LC: *regp = 10; break;
3030 convert_expr_to_xy_reg (e, xy, regp)
3037 if (e->X_op != O_register)
3040 reg = e->X_add_number;
3042 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
3045 *regp = reg - REG_GR;
3047 else if (reg >= REG_FR && reg <= (REG_FR + 127))
3050 *regp = reg - REG_FR;
3052 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3055 *regp = reg - REG_BR;
3065 /* The current frag is an alignment frag. */
3066 align_frag = frag_now;
3067 s_align_bytes (arg);
3072 int dummy ATTRIBUTE_UNUSED;
3077 radix = *input_line_pointer++;
3079 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
3081 as_bad ("Radix `%c' unsupported", *input_line_pointer);
3082 ignore_rest_of_line ();
3087 /* Helper function for .loc directives. If the assembler is not generating
3088 line number info, then we need to remember which instructions have a .loc
3089 directive, and only call dwarf2_gen_line_info for those instructions. */
3094 CURR_SLOT.loc_directive_seen = 1;
3095 dwarf2_directive_loc (x);
3098 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3100 dot_special_section (which)
3103 set_section ((char *) special_section_name[which]);
3106 /* Return -1 for warning and 0 for error. */
3109 unwind_diagnostic (const char * region, const char *directive)
3111 if (md.unwind_check == unwind_check_warning)
3113 as_warn (".%s outside of %s", directive, region);
3118 as_bad (".%s outside of %s", directive, region);
3119 ignore_rest_of_line ();
3124 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3125 a procedure but the unwind directive check is set to warning, 0 if
3126 a directive isn't in a procedure and the unwind directive check is set
3130 in_procedure (const char *directive)
3132 if (unwind.proc_start
3133 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3135 return unwind_diagnostic ("procedure", directive);
3138 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3139 a prologue but the unwind directive check is set to warning, 0 if
3140 a directive isn't in a prologue and the unwind directive check is set
3144 in_prologue (const char *directive)
3146 int in = in_procedure (directive);
3149 /* We are in a procedure. Check if we are in a prologue. */
3150 if (unwind.prologue)
3152 /* We only want to issue one message. */
3154 return unwind_diagnostic ("prologue", directive);
3161 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3162 a body but the unwind directive check is set to warning, 0 if
3163 a directive isn't in a body and the unwind directive check is set
3167 in_body (const char *directive)
3169 int in = in_procedure (directive);
3172 /* We are in a procedure. Check if we are in a body. */
3175 /* We only want to issue one message. */
3177 return unwind_diagnostic ("body region", directive);
3185 add_unwind_entry (ptr)
3189 unwind.tail->next = ptr;
3194 /* The current entry can in fact be a chain of unwind entries. */
3195 if (unwind.current_entry == NULL)
3196 unwind.current_entry = ptr;
3201 int dummy ATTRIBUTE_UNUSED;
3205 if (!in_prologue ("fframe"))
3210 if (e.X_op != O_constant)
3211 as_bad ("Operand to .fframe must be a constant");
3213 add_unwind_entry (output_mem_stack_f (e.X_add_number));
3218 int dummy ATTRIBUTE_UNUSED;
3223 if (!in_prologue ("vframe"))
3227 reg = e.X_add_number - REG_GR;
3228 if (e.X_op == O_register && reg < 128)
3230 add_unwind_entry (output_mem_stack_v ());
3231 if (! (unwind.prologue_mask & 2))
3232 add_unwind_entry (output_psp_gr (reg));
3235 as_bad ("First operand to .vframe must be a general register");
3239 dot_vframesp (dummy)
3240 int dummy ATTRIBUTE_UNUSED;
3244 if (!in_prologue ("vframesp"))
3248 if (e.X_op == O_constant)
3250 add_unwind_entry (output_mem_stack_v ());
3251 add_unwind_entry (output_psp_sprel (e.X_add_number));
3254 as_bad ("Operand to .vframesp must be a constant (sp-relative offset)");
3258 dot_vframepsp (dummy)
3259 int dummy ATTRIBUTE_UNUSED;
3263 if (!in_prologue ("vframepsp"))
3267 if (e.X_op == O_constant)
3269 add_unwind_entry (output_mem_stack_v ());
3270 add_unwind_entry (output_psp_sprel (e.X_add_number));
3273 as_bad ("Operand to .vframepsp must be a constant (psp-relative offset)");
3278 int dummy ATTRIBUTE_UNUSED;
3284 if (!in_prologue ("save"))
3287 sep = parse_operand (&e1);
3289 as_bad ("No second operand to .save");
3290 sep = parse_operand (&e2);
3292 reg1 = e1.X_add_number;
3293 reg2 = e2.X_add_number - REG_GR;
3295 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3296 if (e1.X_op == O_register)
3298 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3302 case REG_AR + AR_BSP:
3303 add_unwind_entry (output_bsp_when ());
3304 add_unwind_entry (output_bsp_gr (reg2));
3306 case REG_AR + AR_BSPSTORE:
3307 add_unwind_entry (output_bspstore_when ());
3308 add_unwind_entry (output_bspstore_gr (reg2));
3310 case REG_AR + AR_RNAT:
3311 add_unwind_entry (output_rnat_when ());
3312 add_unwind_entry (output_rnat_gr (reg2));
3314 case REG_AR + AR_UNAT:
3315 add_unwind_entry (output_unat_when ());
3316 add_unwind_entry (output_unat_gr (reg2));
3318 case REG_AR + AR_FPSR:
3319 add_unwind_entry (output_fpsr_when ());
3320 add_unwind_entry (output_fpsr_gr (reg2));
3322 case REG_AR + AR_PFS:
3323 add_unwind_entry (output_pfs_when ());
3324 if (! (unwind.prologue_mask & 4))
3325 add_unwind_entry (output_pfs_gr (reg2));
3327 case REG_AR + AR_LC:
3328 add_unwind_entry (output_lc_when ());
3329 add_unwind_entry (output_lc_gr (reg2));
3332 add_unwind_entry (output_rp_when ());
3333 if (! (unwind.prologue_mask & 8))
3334 add_unwind_entry (output_rp_gr (reg2));
3337 add_unwind_entry (output_preds_when ());
3338 if (! (unwind.prologue_mask & 1))
3339 add_unwind_entry (output_preds_gr (reg2));
3342 add_unwind_entry (output_priunat_when_gr ());
3343 add_unwind_entry (output_priunat_gr (reg2));
3346 as_bad ("First operand not a valid register");
3350 as_bad (" Second operand not a valid register");
3353 as_bad ("First operand not a register");
3358 int dummy ATTRIBUTE_UNUSED;
3361 unsigned long ecount; /* # of _additional_ regions to pop */
3364 if (!in_body ("restore"))
3367 sep = parse_operand (&e1);
3368 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3370 as_bad ("First operand to .restore must be stack pointer (sp)");
3376 parse_operand (&e2);
3377 if (e2.X_op != O_constant || e2.X_add_number < 0)
3379 as_bad ("Second operand to .restore must be a constant >= 0");
3382 ecount = e2.X_add_number;
3385 ecount = unwind.prologue_count - 1;
3387 if (ecount >= unwind.prologue_count)
3389 as_bad ("Epilogue count of %lu exceeds number of nested prologues (%u)",
3390 ecount + 1, unwind.prologue_count);
3394 add_unwind_entry (output_epilogue (ecount));
3396 if (ecount < unwind.prologue_count)
3397 unwind.prologue_count -= ecount + 1;
3399 unwind.prologue_count = 0;
3403 dot_restorereg (dummy)
3404 int dummy ATTRIBUTE_UNUSED;
3406 unsigned int ab, reg;
3409 if (!in_procedure ("restorereg"))
3414 if (!convert_expr_to_ab_reg (&e, &ab, ®))
3416 as_bad ("First operand to .restorereg must be a preserved register");
3419 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3423 dot_restorereg_p (dummy)
3424 int dummy ATTRIBUTE_UNUSED;
3426 unsigned int qp, ab, reg;
3430 if (!in_procedure ("restorereg.p"))
3433 sep = parse_operand (&e1);
3436 as_bad ("No second operand to .restorereg.p");
3440 parse_operand (&e2);
3442 qp = e1.X_add_number - REG_P;
3443 if (e1.X_op != O_register || qp > 63)
3445 as_bad ("First operand to .restorereg.p must be a predicate");
3449 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3451 as_bad ("Second operand to .restorereg.p must be a preserved register");
3454 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3457 static char *special_linkonce_name[] =
3459 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3463 start_unwind_section (const segT text_seg, int sec_index)
3466 Use a slightly ugly scheme to derive the unwind section names from
3467 the text section name:
3469 text sect. unwind table sect.
3470 name: name: comments:
3471 ---------- ----------------- --------------------------------
3473 .text.foo .IA_64.unwind.text.foo
3474 .foo .IA_64.unwind.foo
3476 .gnu.linkonce.ia64unw.foo
3477 _info .IA_64.unwind_info gas issues error message (ditto)
3478 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3480 This mapping is done so that:
3482 (a) An object file with unwind info only in .text will use
3483 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3484 This follows the letter of the ABI and also ensures backwards
3485 compatibility with older toolchains.
3487 (b) An object file with unwind info in multiple text sections
3488 will use separate unwind sections for each text section.
3489 This allows us to properly set the "sh_info" and "sh_link"
3490 fields in SHT_IA_64_UNWIND as required by the ABI and also
3491 lets GNU ld support programs with multiple segments
3492 containing unwind info (as might be the case for certain
3493 embedded applications).
3495 (c) An error is issued if there would be a name clash.
3498 const char *text_name, *sec_text_name;
3500 const char *prefix = special_section_name [sec_index];
3502 size_t prefix_len, suffix_len, sec_name_len;
3504 sec_text_name = segment_name (text_seg);
3505 text_name = sec_text_name;
3506 if (strncmp (text_name, "_info", 5) == 0)
3508 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3510 ignore_rest_of_line ();
3513 if (strcmp (text_name, ".text") == 0)
3516 /* Build the unwind section name by appending the (possibly stripped)
3517 text section name to the unwind prefix. */
3519 if (strncmp (text_name, ".gnu.linkonce.t.",
3520 sizeof (".gnu.linkonce.t.") - 1) == 0)
3522 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3523 suffix += sizeof (".gnu.linkonce.t.") - 1;
3526 prefix_len = strlen (prefix);
3527 suffix_len = strlen (suffix);
3528 sec_name_len = prefix_len + suffix_len;
3529 sec_name = alloca (sec_name_len + 1);
3530 memcpy (sec_name, prefix, prefix_len);
3531 memcpy (sec_name + prefix_len, suffix, suffix_len);
3532 sec_name [sec_name_len] = '\0';
3534 /* Handle COMDAT group. */
3535 if (suffix == text_name && (text_seg->flags & SEC_LINK_ONCE) != 0)
3538 size_t len, group_name_len;
3539 const char *group_name = elf_group_name (text_seg);
3541 if (group_name == NULL)
3543 as_bad ("Group section `%s' has no group signature",
3545 ignore_rest_of_line ();
3548 /* We have to construct a fake section directive. */
3549 group_name_len = strlen (group_name);
3551 + 16 /* ,"aG",@progbits, */
3552 + group_name_len /* ,group_name */
3555 section = alloca (len + 1);
3556 memcpy (section, sec_name, sec_name_len);
3557 memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16);
3558 memcpy (section + sec_name_len + 16, group_name, group_name_len);
3559 memcpy (section + len - 7, ",comdat", 7);
3560 section [len] = '\0';
3561 set_section (section);
3565 set_section (sec_name);
3566 bfd_set_section_flags (stdoutput, now_seg,
3567 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3570 elf_linked_to_section (now_seg) = text_seg;
3574 generate_unwind_image (const segT text_seg)
3579 /* Mark the end of the unwind info, so that we can compute the size of the
3580 last unwind region. */
3581 add_unwind_entry (output_endp ());
3583 /* Force out pending instructions, to make sure all unwind records have
3584 a valid slot_number field. */
3585 ia64_flush_insns ();
3587 /* Generate the unwind record. */
3588 list = optimize_unw_records (unwind.list);
3589 fixup_unw_records (list, 1);
3590 size = calc_record_size (list);
3592 if (size > 0 || unwind.force_unwind_entry)
3594 unwind.force_unwind_entry = 0;
3595 /* pad to pointer-size boundary. */
3596 pad = size % md.pointer_size;
3598 size += md.pointer_size - pad;
3599 /* Add 8 for the header. */
3601 /* Add a pointer for the personality offset. */
3602 if (unwind.personality_routine)
3603 size += md.pointer_size;
3606 /* If there are unwind records, switch sections, and output the info. */
3610 bfd_reloc_code_real_type reloc;
3612 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3614 /* Make sure the section has 4 byte alignment for ILP32 and
3615 8 byte alignment for LP64. */
3616 frag_align (md.pointer_size_shift, 0, 0);
3617 record_alignment (now_seg, md.pointer_size_shift);
3619 /* Set expression which points to start of unwind descriptor area. */
3620 unwind.info = expr_build_dot ();
3622 frag_var (rs_machine_dependent, size, size, 0, 0,
3623 (offsetT) (long) unwind.personality_routine,
3626 /* Add the personality address to the image. */
3627 if (unwind.personality_routine != 0)
3629 exp.X_op = O_symbol;
3630 exp.X_add_symbol = unwind.personality_routine;
3631 exp.X_add_number = 0;
3633 if (md.flags & EF_IA_64_BE)
3635 if (md.flags & EF_IA_64_ABI64)
3636 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3638 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3642 if (md.flags & EF_IA_64_ABI64)
3643 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3645 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3648 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3649 md.pointer_size, &exp, 0, reloc);
3650 unwind.personality_routine = 0;
3654 free_saved_prologue_counts ();
3655 unwind.list = unwind.tail = unwind.current_entry = NULL;
3659 dot_handlerdata (dummy)
3660 int dummy ATTRIBUTE_UNUSED;
3662 if (!in_procedure ("handlerdata"))
3664 unwind.force_unwind_entry = 1;
3666 /* Remember which segment we're in so we can switch back after .endp */
3667 unwind.saved_text_seg = now_seg;
3668 unwind.saved_text_subseg = now_subseg;
3670 /* Generate unwind info into unwind-info section and then leave that
3671 section as the currently active one so dataXX directives go into
3672 the language specific data area of the unwind info block. */
3673 generate_unwind_image (now_seg);
3674 demand_empty_rest_of_line ();
3678 dot_unwentry (dummy)
3679 int dummy ATTRIBUTE_UNUSED;
3681 if (!in_procedure ("unwentry"))
3683 unwind.force_unwind_entry = 1;
3684 demand_empty_rest_of_line ();
3689 int dummy ATTRIBUTE_UNUSED;
3694 if (!in_prologue ("altrp"))
3698 reg = e.X_add_number - REG_BR;
3699 if (e.X_op == O_register && reg < 8)
3700 add_unwind_entry (output_rp_br (reg));
3702 as_bad ("First operand not a valid branch register");
3706 dot_savemem (psprel)
3713 if (!in_prologue (psprel ? "savepsp" : "savesp"))
3716 sep = parse_operand (&e1);
3718 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3719 sep = parse_operand (&e2);
3721 reg1 = e1.X_add_number;
3722 val = e2.X_add_number;
3724 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3725 if (e1.X_op == O_register)
3727 if (e2.X_op == O_constant)
3731 case REG_AR + AR_BSP:
3732 add_unwind_entry (output_bsp_when ());
3733 add_unwind_entry ((psprel
3735 : output_bsp_sprel) (val));
3737 case REG_AR + AR_BSPSTORE:
3738 add_unwind_entry (output_bspstore_when ());
3739 add_unwind_entry ((psprel
3740 ? output_bspstore_psprel
3741 : output_bspstore_sprel) (val));
3743 case REG_AR + AR_RNAT:
3744 add_unwind_entry (output_rnat_when ());
3745 add_unwind_entry ((psprel
3746 ? output_rnat_psprel
3747 : output_rnat_sprel) (val));
3749 case REG_AR + AR_UNAT:
3750 add_unwind_entry (output_unat_when ());
3751 add_unwind_entry ((psprel
3752 ? output_unat_psprel
3753 : output_unat_sprel) (val));
3755 case REG_AR + AR_FPSR:
3756 add_unwind_entry (output_fpsr_when ());
3757 add_unwind_entry ((psprel
3758 ? output_fpsr_psprel
3759 : output_fpsr_sprel) (val));
3761 case REG_AR + AR_PFS:
3762 add_unwind_entry (output_pfs_when ());
3763 add_unwind_entry ((psprel
3765 : output_pfs_sprel) (val));
3767 case REG_AR + AR_LC:
3768 add_unwind_entry (output_lc_when ());
3769 add_unwind_entry ((psprel
3771 : output_lc_sprel) (val));
3774 add_unwind_entry (output_rp_when ());
3775 add_unwind_entry ((psprel
3777 : output_rp_sprel) (val));
3780 add_unwind_entry (output_preds_when ());
3781 add_unwind_entry ((psprel
3782 ? output_preds_psprel
3783 : output_preds_sprel) (val));
3786 add_unwind_entry (output_priunat_when_mem ());
3787 add_unwind_entry ((psprel
3788 ? output_priunat_psprel
3789 : output_priunat_sprel) (val));
3792 as_bad ("First operand not a valid register");
3796 as_bad (" Second operand not a valid constant");
3799 as_bad ("First operand not a register");
3804 int dummy ATTRIBUTE_UNUSED;
3809 if (!in_prologue ("save.g"))
3812 sep = parse_operand (&e1);
3814 parse_operand (&e2);
3816 if (e1.X_op != O_constant)
3817 as_bad ("First operand to .save.g must be a constant.");
3820 int grmask = e1.X_add_number;
3822 add_unwind_entry (output_gr_mem (grmask));
3825 int reg = e2.X_add_number - REG_GR;
3826 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3827 add_unwind_entry (output_gr_gr (grmask, reg));
3829 as_bad ("Second operand is an invalid register.");
3836 int dummy ATTRIBUTE_UNUSED;
3841 if (!in_prologue ("save.f"))
3844 sep = parse_operand (&e1);
3846 if (e1.X_op != O_constant)
3847 as_bad ("Operand to .save.f must be a constant.");
3849 add_unwind_entry (output_fr_mem (e1.X_add_number));
3854 int dummy ATTRIBUTE_UNUSED;
3861 if (!in_prologue ("save.b"))
3864 sep = parse_operand (&e1);
3865 if (e1.X_op != O_constant)
3867 as_bad ("First operand to .save.b must be a constant.");
3870 brmask = e1.X_add_number;
3874 sep = parse_operand (&e2);
3875 reg = e2.X_add_number - REG_GR;
3876 if (e2.X_op != O_register || reg > 127)
3878 as_bad ("Second operand to .save.b must be a general register.");
3881 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3884 add_unwind_entry (output_br_mem (brmask));
3886 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3887 demand_empty_rest_of_line ();
3892 int dummy ATTRIBUTE_UNUSED;
3897 if (!in_prologue ("save.gf"))
3900 sep = parse_operand (&e1);
3902 parse_operand (&e2);
3904 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3905 as_bad ("Both operands of .save.gf must be constants.");
3908 int grmask = e1.X_add_number;
3909 int frmask = e2.X_add_number;
3910 add_unwind_entry (output_frgr_mem (grmask, frmask));
3916 int dummy ATTRIBUTE_UNUSED;
3921 if (!in_prologue ("spill"))
3924 sep = parse_operand (&e);
3925 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3926 demand_empty_rest_of_line ();
3928 if (e.X_op != O_constant)
3929 as_bad ("Operand to .spill must be a constant");
3931 add_unwind_entry (output_spill_base (e.X_add_number));
3935 dot_spillreg (dummy)
3936 int dummy ATTRIBUTE_UNUSED;
3939 unsigned int ab, xy, reg, treg;
3942 if (!in_procedure ("spillreg"))
3945 sep = parse_operand (&e1);
3948 as_bad ("No second operand to .spillreg");
3952 parse_operand (&e2);
3954 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3956 as_bad ("First operand to .spillreg must be a preserved register");
3960 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3962 as_bad ("Second operand to .spillreg must be a register");
3966 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3970 dot_spillmem (psprel)
3975 unsigned int ab, reg;
3977 if (!in_procedure ("spillmem"))
3980 sep = parse_operand (&e1);
3983 as_bad ("Second operand missing");
3987 parse_operand (&e2);
3989 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3991 as_bad ("First operand to .spill%s must be a preserved register",
3992 psprel ? "psp" : "sp");
3996 if (e2.X_op != O_constant)
3998 as_bad ("Second operand to .spill%s must be a constant",
3999 psprel ? "psp" : "sp");
4004 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
4006 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
4010 dot_spillreg_p (dummy)
4011 int dummy ATTRIBUTE_UNUSED;
4014 unsigned int ab, xy, reg, treg;
4015 expressionS e1, e2, e3;
4018 if (!in_procedure ("spillreg.p"))
4021 sep = parse_operand (&e1);
4024 as_bad ("No second and third operand to .spillreg.p");
4028 sep = parse_operand (&e2);
4031 as_bad ("No third operand to .spillreg.p");
4035 parse_operand (&e3);
4037 qp = e1.X_add_number - REG_P;
4039 if (e1.X_op != O_register || qp > 63)
4041 as_bad ("First operand to .spillreg.p must be a predicate");
4045 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
4047 as_bad ("Second operand to .spillreg.p must be a preserved register");
4051 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
4053 as_bad ("Third operand to .spillreg.p must be a register");
4057 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
4061 dot_spillmem_p (psprel)
4064 expressionS e1, e2, e3;
4066 unsigned int ab, reg;
4069 if (!in_procedure ("spillmem.p"))
4072 sep = parse_operand (&e1);
4075 as_bad ("Second operand missing");
4079 parse_operand (&e2);
4082 as_bad ("Second operand missing");
4086 parse_operand (&e3);
4088 qp = e1.X_add_number - REG_P;
4089 if (e1.X_op != O_register || qp > 63)
4091 as_bad ("First operand to .spill%s_p must be a predicate",
4092 psprel ? "psp" : "sp");
4096 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
4098 as_bad ("Second operand to .spill%s_p must be a preserved register",
4099 psprel ? "psp" : "sp");
4103 if (e3.X_op != O_constant)
4105 as_bad ("Third operand to .spill%s_p must be a constant",
4106 psprel ? "psp" : "sp");
4111 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
4113 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
4117 get_saved_prologue_count (lbl)
4120 label_prologue_count *lpc = unwind.saved_prologue_counts;
4122 while (lpc != NULL && lpc->label_number != lbl)
4126 return lpc->prologue_count;
4128 as_bad ("Missing .label_state %ld", lbl);
4133 save_prologue_count (lbl, count)
4137 label_prologue_count *lpc = unwind.saved_prologue_counts;
4139 while (lpc != NULL && lpc->label_number != lbl)
4143 lpc->prologue_count = count;
4146 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
4148 new_lpc->next = unwind.saved_prologue_counts;
4149 new_lpc->label_number = lbl;
4150 new_lpc->prologue_count = count;
4151 unwind.saved_prologue_counts = new_lpc;
4156 free_saved_prologue_counts ()
4158 label_prologue_count *lpc = unwind.saved_prologue_counts;
4159 label_prologue_count *next;
4168 unwind.saved_prologue_counts = NULL;
4172 dot_label_state (dummy)
4173 int dummy ATTRIBUTE_UNUSED;
4177 if (!in_body ("label_state"))
4181 if (e.X_op != O_constant)
4183 as_bad ("Operand to .label_state must be a constant");
4186 add_unwind_entry (output_label_state (e.X_add_number));
4187 save_prologue_count (e.X_add_number, unwind.prologue_count);
4191 dot_copy_state (dummy)
4192 int dummy ATTRIBUTE_UNUSED;
4196 if (!in_body ("copy_state"))
4200 if (e.X_op != O_constant)
4202 as_bad ("Operand to .copy_state must be a constant");
4205 add_unwind_entry (output_copy_state (e.X_add_number));
4206 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4211 int dummy ATTRIBUTE_UNUSED;
4216 if (!in_procedure ("unwabi"))
4219 sep = parse_operand (&e1);
4222 as_bad ("Second operand to .unwabi missing");
4225 sep = parse_operand (&e2);
4226 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4227 demand_empty_rest_of_line ();
4229 if (e1.X_op != O_constant)
4231 as_bad ("First operand to .unwabi must be a constant");
4235 if (e2.X_op != O_constant)
4237 as_bad ("Second operand to .unwabi must be a constant");
4241 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
4245 dot_personality (dummy)
4246 int dummy ATTRIBUTE_UNUSED;
4249 if (!in_procedure ("personality"))
4252 name = input_line_pointer;
4253 c = get_symbol_end ();
4254 p = input_line_pointer;
4255 unwind.personality_routine = symbol_find_or_make (name);
4256 unwind.force_unwind_entry = 1;
4259 demand_empty_rest_of_line ();
4264 int dummy ATTRIBUTE_UNUSED;
4269 unwind.proc_start = 0;
4270 /* Parse names of main and alternate entry points and mark them as
4271 function symbols: */
4275 name = input_line_pointer;
4276 c = get_symbol_end ();
4277 p = input_line_pointer;
4279 as_bad ("Empty argument of .proc");
4282 sym = symbol_find_or_make (name);
4283 if (S_IS_DEFINED (sym))
4284 as_bad ("`%s' was already defined", name);
4285 else if (unwind.proc_start == 0)
4287 unwind.proc_start = sym;
4289 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4293 if (*input_line_pointer != ',')
4295 ++input_line_pointer;
4297 if (unwind.proc_start == 0)
4298 unwind.proc_start = expr_build_dot ();
4299 demand_empty_rest_of_line ();
4302 unwind.prologue = 0;
4303 unwind.prologue_count = 0;
4306 unwind.list = unwind.tail = unwind.current_entry = NULL;
4307 unwind.personality_routine = 0;
4312 int dummy ATTRIBUTE_UNUSED;
4314 if (!in_procedure ("body"))
4316 if (!unwind.prologue && !unwind.body && unwind.insn)
4317 as_warn ("Initial .body should precede any instructions");
4319 unwind.prologue = 0;
4320 unwind.prologue_mask = 0;
4323 add_unwind_entry (output_body ());
4324 demand_empty_rest_of_line ();
4328 dot_prologue (dummy)
4329 int dummy ATTRIBUTE_UNUSED;
4332 int mask = 0, grsave = 0;
4334 if (!in_procedure ("prologue"))
4336 if (unwind.prologue)
4338 as_bad (".prologue within prologue");
4339 ignore_rest_of_line ();
4342 if (!unwind.body && unwind.insn)
4343 as_warn ("Initial .prologue should precede any instructions");
4345 if (!is_it_end_of_statement ())
4348 sep = parse_operand (&e1);
4350 as_bad ("No second operand to .prologue");
4351 sep = parse_operand (&e2);
4352 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4353 demand_empty_rest_of_line ();
4355 if (e1.X_op == O_constant)
4357 mask = e1.X_add_number;
4359 if (e2.X_op == O_constant)
4360 grsave = e2.X_add_number;
4361 else if (e2.X_op == O_register
4362 && (grsave = e2.X_add_number - REG_GR) < 128)
4365 as_bad ("Second operand not a constant or general register");
4367 add_unwind_entry (output_prologue_gr (mask, grsave));
4370 as_bad ("First operand not a constant");
4373 add_unwind_entry (output_prologue ());
4375 unwind.prologue = 1;
4376 unwind.prologue_mask = mask;
4378 ++unwind.prologue_count;
4383 int dummy ATTRIBUTE_UNUSED;
4387 int bytes_per_address;
4390 subsegT saved_subseg;
4391 char *name, *default_name, *p, c;
4393 int unwind_check = md.unwind_check;
4395 md.unwind_check = unwind_check_error;
4396 if (!in_procedure ("endp"))
4398 md.unwind_check = unwind_check;
4400 if (unwind.saved_text_seg)
4402 saved_seg = unwind.saved_text_seg;
4403 saved_subseg = unwind.saved_text_subseg;
4404 unwind.saved_text_seg = NULL;
4408 saved_seg = now_seg;
4409 saved_subseg = now_subseg;
4412 insn_group_break (1, 0, 0);
4414 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4416 generate_unwind_image (saved_seg);
4418 if (unwind.info || unwind.force_unwind_entry)
4422 subseg_set (md.last_text_seg, 0);
4423 proc_end = expr_build_dot ();
4425 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4427 /* Make sure that section has 4 byte alignment for ILP32 and
4428 8 byte alignment for LP64. */
4429 record_alignment (now_seg, md.pointer_size_shift);
4431 /* Need space for 3 pointers for procedure start, procedure end,
4433 ptr = frag_more (3 * md.pointer_size);
4434 where = frag_now_fix () - (3 * md.pointer_size);
4435 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4437 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4438 e.X_op = O_pseudo_fixup;
4439 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4441 e.X_add_symbol = unwind.proc_start;
4442 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
4444 e.X_op = O_pseudo_fixup;
4445 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4447 e.X_add_symbol = proc_end;
4448 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4449 bytes_per_address, &e);
4453 e.X_op = O_pseudo_fixup;
4454 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4456 e.X_add_symbol = unwind.info;
4457 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4458 bytes_per_address, &e);
4461 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
4465 subseg_set (saved_seg, saved_subseg);
4467 if (unwind.proc_start)
4468 default_name = (char *) S_GET_NAME (unwind.proc_start);
4470 default_name = NULL;
4472 /* Parse names of main and alternate entry points and set symbol sizes. */
4476 name = input_line_pointer;
4477 c = get_symbol_end ();
4478 p = input_line_pointer;
4481 if (md.unwind_check == unwind_check_warning)
4485 as_warn ("Empty argument of .endp. Use the default name `%s'",
4487 name = default_name;
4490 as_warn ("Empty argument of .endp");
4493 as_bad ("Empty argument of .endp");
4497 sym = symbol_find (name);
4499 && md.unwind_check == unwind_check_warning
4501 && default_name != name)
4503 /* We have a bad name. Try the default one if needed. */
4504 as_warn ("`%s' was not defined within procedure. Use the default name `%s'",
4505 name, default_name);
4506 name = default_name;
4507 sym = symbol_find (name);
4509 if (!sym || !S_IS_DEFINED (sym))
4510 as_bad ("`%s' was not defined within procedure", name);
4511 else if (unwind.proc_start
4512 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION)
4513 && S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL)
4515 fragS *fr = symbol_get_frag (unwind.proc_start);
4516 fragS *frag = symbol_get_frag (sym);
4518 /* Check whether the function label is at or beyond last
4520 while (fr && fr != frag)
4524 if (frag == frag_now && SEG_NORMAL (now_seg))
4525 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4528 symbol_get_obj (sym)->size =
4529 (expressionS *) xmalloc (sizeof (expressionS));
4530 symbol_get_obj (sym)->size->X_op = O_subtract;
4531 symbol_get_obj (sym)->size->X_add_symbol
4532 = symbol_new (FAKE_LABEL_NAME, now_seg,
4533 frag_now_fix (), frag_now);
4534 symbol_get_obj (sym)->size->X_op_symbol = sym;
4535 symbol_get_obj (sym)->size->X_add_number = 0;
4542 if (*input_line_pointer != ',')
4544 ++input_line_pointer;
4546 demand_empty_rest_of_line ();
4547 unwind.proc_start = unwind.info = 0;
4551 dot_template (template)
4554 CURR_SLOT.user_template = template;
4559 int dummy ATTRIBUTE_UNUSED;
4561 int ins, locs, outs, rots;
4563 if (is_it_end_of_statement ())
4564 ins = locs = outs = rots = 0;
4567 ins = get_absolute_expression ();
4568 if (*input_line_pointer++ != ',')
4570 locs = get_absolute_expression ();
4571 if (*input_line_pointer++ != ',')
4573 outs = get_absolute_expression ();
4574 if (*input_line_pointer++ != ',')
4576 rots = get_absolute_expression ();
4578 set_regstack (ins, locs, outs, rots);
4582 as_bad ("Comma expected");
4583 ignore_rest_of_line ();
4590 unsigned num_regs, num_alloced = 0;
4591 struct dynreg **drpp, *dr;
4592 int ch, base_reg = 0;
4598 case DYNREG_GR: base_reg = REG_GR + 32; break;
4599 case DYNREG_FR: base_reg = REG_FR + 32; break;
4600 case DYNREG_PR: base_reg = REG_P + 16; break;
4604 /* First, remove existing names from hash table. */
4605 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4607 hash_delete (md.dynreg_hash, dr->name);
4608 /* FIXME: Free dr->name. */
4612 drpp = &md.dynreg[type];
4615 start = input_line_pointer;
4616 ch = get_symbol_end ();
4617 len = strlen (ia64_canonicalize_symbol_name (start));
4618 *input_line_pointer = ch;
4621 if (*input_line_pointer != '[')
4623 as_bad ("Expected '['");
4626 ++input_line_pointer; /* skip '[' */
4628 num_regs = get_absolute_expression ();
4630 if (*input_line_pointer++ != ']')
4632 as_bad ("Expected ']'");
4637 num_alloced += num_regs;
4641 if (num_alloced > md.rot.num_regs)
4643 as_bad ("Used more than the declared %d rotating registers",
4649 if (num_alloced > 96)
4651 as_bad ("Used more than the available 96 rotating registers");
4656 if (num_alloced > 48)
4658 as_bad ("Used more than the available 48 rotating registers");
4669 *drpp = obstack_alloc (¬es, sizeof (*dr));
4670 memset (*drpp, 0, sizeof (*dr));
4673 name = obstack_alloc (¬es, len + 1);
4674 memcpy (name, start, len);
4679 dr->num_regs = num_regs;
4680 dr->base = base_reg;
4682 base_reg += num_regs;
4684 if (hash_insert (md.dynreg_hash, name, dr))
4686 as_bad ("Attempt to redefine register set `%s'", name);
4687 obstack_free (¬es, name);
4691 if (*input_line_pointer != ',')
4693 ++input_line_pointer; /* skip comma */
4696 demand_empty_rest_of_line ();
4700 ignore_rest_of_line ();
4704 dot_byteorder (byteorder)
4707 segment_info_type *seginfo = seg_info (now_seg);
4709 if (byteorder == -1)
4711 if (seginfo->tc_segment_info_data.endian == 0)
4712 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4713 byteorder = seginfo->tc_segment_info_data.endian == 1;
4716 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4718 if (target_big_endian != byteorder)
4720 target_big_endian = byteorder;
4721 if (target_big_endian)
4723 ia64_number_to_chars = number_to_chars_bigendian;
4724 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4728 ia64_number_to_chars = number_to_chars_littleendian;
4729 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4736 int dummy ATTRIBUTE_UNUSED;
4743 option = input_line_pointer;
4744 ch = get_symbol_end ();
4745 if (strcmp (option, "lsb") == 0)
4746 md.flags &= ~EF_IA_64_BE;
4747 else if (strcmp (option, "msb") == 0)
4748 md.flags |= EF_IA_64_BE;
4749 else if (strcmp (option, "abi32") == 0)
4750 md.flags &= ~EF_IA_64_ABI64;
4751 else if (strcmp (option, "abi64") == 0)
4752 md.flags |= EF_IA_64_ABI64;
4754 as_bad ("Unknown psr option `%s'", option);
4755 *input_line_pointer = ch;
4758 if (*input_line_pointer != ',')
4761 ++input_line_pointer;
4764 demand_empty_rest_of_line ();
4769 int dummy ATTRIBUTE_UNUSED;
4771 new_logical_line (0, get_absolute_expression ());
4772 demand_empty_rest_of_line ();
4776 cross_section (ref, cons, ua)
4778 void (*cons) PARAMS((int));
4782 int saved_auto_align;
4783 unsigned int section_count;
4786 start = input_line_pointer;
4792 name = demand_copy_C_string (&len);
4793 obstack_free(¬es, name);
4796 ignore_rest_of_line ();
4802 char c = get_symbol_end ();
4804 if (input_line_pointer == start)
4806 as_bad ("Missing section name");
4807 ignore_rest_of_line ();
4810 *input_line_pointer = c;
4812 end = input_line_pointer;
4814 if (*input_line_pointer != ',')
4816 as_bad ("Comma expected after section name");
4817 ignore_rest_of_line ();
4821 end = input_line_pointer + 1; /* skip comma */
4822 input_line_pointer = start;
4823 md.keep_pending_output = 1;
4824 section_count = bfd_count_sections(stdoutput);
4825 obj_elf_section (0);
4826 if (section_count != bfd_count_sections(stdoutput))
4827 as_warn ("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated.");
4828 input_line_pointer = end;
4829 saved_auto_align = md.auto_align;
4834 md.auto_align = saved_auto_align;
4835 obj_elf_previous (0);
4836 md.keep_pending_output = 0;
4843 cross_section (size, cons, 0);
4846 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4849 stmt_float_cons (kind)
4870 ia64_do_align (alignment);
4878 int saved_auto_align = md.auto_align;
4882 md.auto_align = saved_auto_align;
4886 dot_xfloat_cons (kind)
4889 cross_section (kind, stmt_float_cons, 0);
4893 dot_xstringer (zero)
4896 cross_section (zero, stringer, 0);
4903 cross_section (size, cons, 1);
4907 dot_xfloat_cons_ua (kind)
4910 cross_section (kind, float_cons, 1);
4913 /* .reg.val <regname>,value */
4917 int dummy ATTRIBUTE_UNUSED;
4922 if (reg.X_op != O_register)
4924 as_bad (_("Register name expected"));
4925 ignore_rest_of_line ();
4927 else if (*input_line_pointer++ != ',')
4929 as_bad (_("Comma expected"));
4930 ignore_rest_of_line ();
4934 valueT value = get_absolute_expression ();
4935 int regno = reg.X_add_number;
4936 if (regno <= REG_GR || regno > REG_GR + 127)
4937 as_warn (_("Register value annotation ignored"));
4940 gr_values[regno - REG_GR].known = 1;
4941 gr_values[regno - REG_GR].value = value;
4942 gr_values[regno - REG_GR].path = md.path;
4945 demand_empty_rest_of_line ();
4950 .serialize.instruction
4953 dot_serialize (type)
4956 insn_group_break (0, 0, 0);
4958 instruction_serialization ();
4960 data_serialization ();
4961 insn_group_break (0, 0, 0);
4962 demand_empty_rest_of_line ();
4965 /* select dv checking mode
4970 A stop is inserted when changing modes
4977 if (md.manual_bundling)
4978 as_warn (_("Directive invalid within a bundle"));
4980 if (type == 'E' || type == 'A')
4981 md.mode_explicitly_set = 0;
4983 md.mode_explicitly_set = 1;
4990 if (md.explicit_mode)
4991 insn_group_break (1, 0, 0);
4992 md.explicit_mode = 0;
4996 if (!md.explicit_mode)
4997 insn_group_break (1, 0, 0);
4998 md.explicit_mode = 1;
5002 if (md.explicit_mode != md.default_explicit_mode)
5003 insn_group_break (1, 0, 0);
5004 md.explicit_mode = md.default_explicit_mode;
5005 md.mode_explicitly_set = 0;
5016 for (regno = 0; regno < 64; regno++)
5018 if (mask & ((valueT) 1 << regno))
5020 fprintf (stderr, "%s p%d", comma, regno);
5027 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
5028 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
5029 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
5030 .pred.safe_across_calls p1 [, p2 [,...]]
5039 int p1 = -1, p2 = -1;
5043 if (*input_line_pointer == '"')
5046 char *form = demand_copy_C_string (&len);
5048 if (strcmp (form, "mutex") == 0)
5050 else if (strcmp (form, "clear") == 0)
5052 else if (strcmp (form, "imply") == 0)
5054 obstack_free (¬es, form);
5056 else if (*input_line_pointer == '@')
5058 char *form = ++input_line_pointer;
5059 char c = get_symbol_end();
5061 if (strcmp (form, "mutex") == 0)
5063 else if (strcmp (form, "clear") == 0)
5065 else if (strcmp (form, "imply") == 0)
5067 *input_line_pointer = c;
5071 as_bad (_("Missing predicate relation type"));
5072 ignore_rest_of_line ();
5077 as_bad (_("Unrecognized predicate relation type"));
5078 ignore_rest_of_line ();
5081 if (*input_line_pointer == ',')
5082 ++input_line_pointer;
5091 expressionS pr, *pr1, *pr2;
5094 if (pr.X_op == O_register
5095 && pr.X_add_number >= REG_P
5096 && pr.X_add_number <= REG_P + 63)
5098 regno = pr.X_add_number - REG_P;
5106 else if (type != 'i'
5107 && pr.X_op == O_subtract
5108 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5109 && pr1->X_op == O_register
5110 && pr1->X_add_number >= REG_P
5111 && pr1->X_add_number <= REG_P + 63
5112 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5113 && pr2->X_op == O_register
5114 && pr2->X_add_number >= REG_P
5115 && pr2->X_add_number <= REG_P + 63)
5120 regno = pr1->X_add_number - REG_P;
5121 stop = pr2->X_add_number - REG_P;
5124 as_bad (_("Bad register range"));
5125 ignore_rest_of_line ();
5128 bits = ((bits << stop) << 1) - (bits << regno);
5129 count += stop - regno + 1;
5133 as_bad (_("Predicate register expected"));
5134 ignore_rest_of_line ();
5138 as_warn (_("Duplicate predicate register ignored"));
5140 if (*input_line_pointer != ',')
5142 ++input_line_pointer;
5151 clear_qp_mutex (mask);
5152 clear_qp_implies (mask, (valueT) 0);
5155 if (count != 2 || p1 == -1 || p2 == -1)
5156 as_bad (_("Predicate source and target required"));
5157 else if (p1 == 0 || p2 == 0)
5158 as_bad (_("Use of p0 is not valid in this context"));
5160 add_qp_imply (p1, p2);
5165 as_bad (_("At least two PR arguments expected"));
5170 as_bad (_("Use of p0 is not valid in this context"));
5173 add_qp_mutex (mask);
5176 /* note that we don't override any existing relations */
5179 as_bad (_("At least one PR argument expected"));
5184 fprintf (stderr, "Safe across calls: ");
5185 print_prmask (mask);
5186 fprintf (stderr, "\n");
5188 qp_safe_across_calls = mask;
5191 demand_empty_rest_of_line ();
5194 /* .entry label [, label [, ...]]
5195 Hint to DV code that the given labels are to be considered entry points.
5196 Otherwise, only global labels are considered entry points. */
5200 int dummy ATTRIBUTE_UNUSED;
5209 name = input_line_pointer;
5210 c = get_symbol_end ();
5211 symbolP = symbol_find_or_make (name);
5213 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
5215 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5218 *input_line_pointer = c;
5220 c = *input_line_pointer;
5223 input_line_pointer++;
5225 if (*input_line_pointer == '\n')
5231 demand_empty_rest_of_line ();
5234 /* .mem.offset offset, base
5235 "base" is used to distinguish between offsets from a different base. */
5238 dot_mem_offset (dummy)
5239 int dummy ATTRIBUTE_UNUSED;
5241 md.mem_offset.hint = 1;
5242 md.mem_offset.offset = get_absolute_expression ();
5243 if (*input_line_pointer != ',')
5245 as_bad (_("Comma expected"));
5246 ignore_rest_of_line ();
5249 ++input_line_pointer;
5250 md.mem_offset.base = get_absolute_expression ();
5251 demand_empty_rest_of_line ();
5254 /* ia64-specific pseudo-ops: */
5255 const pseudo_typeS md_pseudo_table[] =
5257 { "radix", dot_radix, 0 },
5258 { "lcomm", s_lcomm_bytes, 1 },
5259 { "loc", dot_loc, 0 },
5260 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5261 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5262 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5263 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5264 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5265 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5266 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5267 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5268 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5269 { "proc", dot_proc, 0 },
5270 { "body", dot_body, 0 },
5271 { "prologue", dot_prologue, 0 },
5272 { "endp", dot_endp, 0 },
5274 { "fframe", dot_fframe, 0 },
5275 { "vframe", dot_vframe, 0 },
5276 { "vframesp", dot_vframesp, 0 },
5277 { "vframepsp", dot_vframepsp, 0 },
5278 { "save", dot_save, 0 },
5279 { "restore", dot_restore, 0 },
5280 { "restorereg", dot_restorereg, 0 },
5281 { "restorereg.p", dot_restorereg_p, 0 },
5282 { "handlerdata", dot_handlerdata, 0 },
5283 { "unwentry", dot_unwentry, 0 },
5284 { "altrp", dot_altrp, 0 },
5285 { "savesp", dot_savemem, 0 },
5286 { "savepsp", dot_savemem, 1 },
5287 { "save.g", dot_saveg, 0 },
5288 { "save.f", dot_savef, 0 },
5289 { "save.b", dot_saveb, 0 },
5290 { "save.gf", dot_savegf, 0 },
5291 { "spill", dot_spill, 0 },
5292 { "spillreg", dot_spillreg, 0 },
5293 { "spillsp", dot_spillmem, 0 },
5294 { "spillpsp", dot_spillmem, 1 },
5295 { "spillreg.p", dot_spillreg_p, 0 },
5296 { "spillsp.p", dot_spillmem_p, 0 },
5297 { "spillpsp.p", dot_spillmem_p, 1 },
5298 { "label_state", dot_label_state, 0 },
5299 { "copy_state", dot_copy_state, 0 },
5300 { "unwabi", dot_unwabi, 0 },
5301 { "personality", dot_personality, 0 },
5302 { "mii", dot_template, 0x0 },
5303 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5304 { "mlx", dot_template, 0x2 },
5305 { "mmi", dot_template, 0x4 },
5306 { "mfi", dot_template, 0x6 },
5307 { "mmf", dot_template, 0x7 },
5308 { "mib", dot_template, 0x8 },
5309 { "mbb", dot_template, 0x9 },
5310 { "bbb", dot_template, 0xb },
5311 { "mmb", dot_template, 0xc },
5312 { "mfb", dot_template, 0xe },
5313 { "align", dot_align, 0 },
5314 { "regstk", dot_regstk, 0 },
5315 { "rotr", dot_rot, DYNREG_GR },
5316 { "rotf", dot_rot, DYNREG_FR },
5317 { "rotp", dot_rot, DYNREG_PR },
5318 { "lsb", dot_byteorder, 0 },
5319 { "msb", dot_byteorder, 1 },
5320 { "psr", dot_psr, 0 },
5321 { "alias", dot_alias, 0 },
5322 { "secalias", dot_alias, 1 },
5323 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5325 { "xdata1", dot_xdata, 1 },
5326 { "xdata2", dot_xdata, 2 },
5327 { "xdata4", dot_xdata, 4 },
5328 { "xdata8", dot_xdata, 8 },
5329 { "xdata16", dot_xdata, 16 },
5330 { "xreal4", dot_xfloat_cons, 'f' },
5331 { "xreal8", dot_xfloat_cons, 'd' },
5332 { "xreal10", dot_xfloat_cons, 'x' },
5333 { "xreal16", dot_xfloat_cons, 'X' },
5334 { "xstring", dot_xstringer, 0 },
5335 { "xstringz", dot_xstringer, 1 },
5337 /* unaligned versions: */
5338 { "xdata2.ua", dot_xdata_ua, 2 },
5339 { "xdata4.ua", dot_xdata_ua, 4 },
5340 { "xdata8.ua", dot_xdata_ua, 8 },
5341 { "xdata16.ua", dot_xdata_ua, 16 },
5342 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5343 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5344 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5345 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5347 /* annotations/DV checking support */
5348 { "entry", dot_entry, 0 },
5349 { "mem.offset", dot_mem_offset, 0 },
5350 { "pred.rel", dot_pred_rel, 0 },
5351 { "pred.rel.clear", dot_pred_rel, 'c' },
5352 { "pred.rel.imply", dot_pred_rel, 'i' },
5353 { "pred.rel.mutex", dot_pred_rel, 'm' },
5354 { "pred.safe_across_calls", dot_pred_rel, 's' },
5355 { "reg.val", dot_reg_val, 0 },
5356 { "serialize.data", dot_serialize, 0 },
5357 { "serialize.instruction", dot_serialize, 1 },
5358 { "auto", dot_dv_mode, 'a' },
5359 { "explicit", dot_dv_mode, 'e' },
5360 { "default", dot_dv_mode, 'd' },
5362 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5363 IA-64 aligns data allocation pseudo-ops by default, so we have to
5364 tell it that these ones are supposed to be unaligned. Long term,
5365 should rewrite so that only IA-64 specific data allocation pseudo-ops
5366 are aligned by default. */
5367 {"2byte", stmt_cons_ua, 2},
5368 {"4byte", stmt_cons_ua, 4},
5369 {"8byte", stmt_cons_ua, 8},
5374 static const struct pseudo_opcode
5377 void (*handler) (int);
5382 /* these are more like pseudo-ops, but don't start with a dot */
5383 { "data1", cons, 1 },
5384 { "data2", cons, 2 },
5385 { "data4", cons, 4 },
5386 { "data8", cons, 8 },
5387 { "data16", cons, 16 },
5388 { "real4", stmt_float_cons, 'f' },
5389 { "real8", stmt_float_cons, 'd' },
5390 { "real10", stmt_float_cons, 'x' },
5391 { "real16", stmt_float_cons, 'X' },
5392 { "string", stringer, 0 },
5393 { "stringz", stringer, 1 },
5395 /* unaligned versions: */
5396 { "data2.ua", stmt_cons_ua, 2 },
5397 { "data4.ua", stmt_cons_ua, 4 },
5398 { "data8.ua", stmt_cons_ua, 8 },
5399 { "data16.ua", stmt_cons_ua, 16 },
5400 { "real4.ua", float_cons, 'f' },
5401 { "real8.ua", float_cons, 'd' },
5402 { "real10.ua", float_cons, 'x' },
5403 { "real16.ua", float_cons, 'X' },
5406 /* Declare a register by creating a symbol for it and entering it in
5407 the symbol table. */
5410 declare_register (name, regnum)
5417 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
5419 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
5421 as_fatal ("Inserting \"%s\" into register table failed: %s",
5428 declare_register_set (prefix, num_regs, base_regnum)
5436 for (i = 0; i < num_regs; ++i)
5438 sprintf (name, "%s%u", prefix, i);
5439 declare_register (name, base_regnum + i);
5444 operand_width (opnd)
5445 enum ia64_opnd opnd;
5447 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5448 unsigned int bits = 0;
5452 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5453 bits += odesc->field[i].bits;
5458 static enum operand_match_result
5459 operand_match (idesc, index, e)
5460 const struct ia64_opcode *idesc;
5464 enum ia64_opnd opnd = idesc->operands[index];
5465 int bits, relocatable = 0;
5466 struct insn_fix *fix;
5473 case IA64_OPND_AR_CCV:
5474 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5475 return OPERAND_MATCH;
5478 case IA64_OPND_AR_CSD:
5479 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5480 return OPERAND_MATCH;
5483 case IA64_OPND_AR_PFS:
5484 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5485 return OPERAND_MATCH;
5489 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5490 return OPERAND_MATCH;
5494 if (e->X_op == O_register && e->X_add_number == REG_IP)
5495 return OPERAND_MATCH;
5499 if (e->X_op == O_register && e->X_add_number == REG_PR)
5500 return OPERAND_MATCH;
5503 case IA64_OPND_PR_ROT:
5504 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5505 return OPERAND_MATCH;
5509 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5510 return OPERAND_MATCH;
5513 case IA64_OPND_PSR_L:
5514 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5515 return OPERAND_MATCH;
5518 case IA64_OPND_PSR_UM:
5519 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5520 return OPERAND_MATCH;
5524 if (e->X_op == O_constant)
5526 if (e->X_add_number == 1)
5527 return OPERAND_MATCH;
5529 return OPERAND_OUT_OF_RANGE;
5534 if (e->X_op == O_constant)
5536 if (e->X_add_number == 8)
5537 return OPERAND_MATCH;
5539 return OPERAND_OUT_OF_RANGE;
5544 if (e->X_op == O_constant)
5546 if (e->X_add_number == 16)
5547 return OPERAND_MATCH;
5549 return OPERAND_OUT_OF_RANGE;
5553 /* register operands: */
5556 if (e->X_op == O_register && e->X_add_number >= REG_AR
5557 && e->X_add_number < REG_AR + 128)
5558 return OPERAND_MATCH;
5563 if (e->X_op == O_register && e->X_add_number >= REG_BR
5564 && e->X_add_number < REG_BR + 8)
5565 return OPERAND_MATCH;
5569 if (e->X_op == O_register && e->X_add_number >= REG_CR
5570 && e->X_add_number < REG_CR + 128)
5571 return OPERAND_MATCH;
5578 if (e->X_op == O_register && e->X_add_number >= REG_FR
5579 && e->X_add_number < REG_FR + 128)
5580 return OPERAND_MATCH;
5585 if (e->X_op == O_register && e->X_add_number >= REG_P
5586 && e->X_add_number < REG_P + 64)
5587 return OPERAND_MATCH;
5593 if (e->X_op == O_register && e->X_add_number >= REG_GR
5594 && e->X_add_number < REG_GR + 128)
5595 return OPERAND_MATCH;
5598 case IA64_OPND_R3_2:
5599 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5601 if (e->X_add_number < REG_GR + 4)
5602 return OPERAND_MATCH;
5603 else if (e->X_add_number < REG_GR + 128)
5604 return OPERAND_OUT_OF_RANGE;
5608 /* indirect operands: */
5609 case IA64_OPND_CPUID_R3:
5610 case IA64_OPND_DBR_R3:
5611 case IA64_OPND_DTR_R3:
5612 case IA64_OPND_ITR_R3:
5613 case IA64_OPND_IBR_R3:
5614 case IA64_OPND_MSR_R3:
5615 case IA64_OPND_PKR_R3:
5616 case IA64_OPND_PMC_R3:
5617 case IA64_OPND_PMD_R3:
5618 case IA64_OPND_RR_R3:
5619 if (e->X_op == O_index && e->X_op_symbol
5620 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5621 == opnd - IA64_OPND_CPUID_R3))
5622 return OPERAND_MATCH;
5626 if (e->X_op == O_index && !e->X_op_symbol)
5627 return OPERAND_MATCH;
5630 /* immediate operands: */
5631 case IA64_OPND_CNT2a:
5632 case IA64_OPND_LEN4:
5633 case IA64_OPND_LEN6:
5634 bits = operand_width (idesc->operands[index]);
5635 if (e->X_op == O_constant)
5637 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5638 return OPERAND_MATCH;
5640 return OPERAND_OUT_OF_RANGE;
5644 case IA64_OPND_CNT2b:
5645 if (e->X_op == O_constant)
5647 if ((bfd_vma) (e->X_add_number - 1) < 3)
5648 return OPERAND_MATCH;
5650 return OPERAND_OUT_OF_RANGE;
5654 case IA64_OPND_CNT2c:
5655 val = e->X_add_number;
5656 if (e->X_op == O_constant)
5658 if ((val == 0 || val == 7 || val == 15 || val == 16))
5659 return OPERAND_MATCH;
5661 return OPERAND_OUT_OF_RANGE;
5666 /* SOR must be an integer multiple of 8 */
5667 if (e->X_op == O_constant && e->X_add_number & 0x7)
5668 return OPERAND_OUT_OF_RANGE;
5671 if (e->X_op == O_constant)
5673 if ((bfd_vma) e->X_add_number <= 96)
5674 return OPERAND_MATCH;
5676 return OPERAND_OUT_OF_RANGE;
5680 case IA64_OPND_IMMU62:
5681 if (e->X_op == O_constant)
5683 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5684 return OPERAND_MATCH;
5686 return OPERAND_OUT_OF_RANGE;
5690 /* FIXME -- need 62-bit relocation type */
5691 as_bad (_("62-bit relocation not yet implemented"));
5695 case IA64_OPND_IMMU64:
5696 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5697 || e->X_op == O_subtract)
5699 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5700 fix->code = BFD_RELOC_IA64_IMM64;
5701 if (e->X_op != O_subtract)
5703 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5704 if (e->X_op == O_pseudo_fixup)
5708 fix->opnd = idesc->operands[index];
5711 ++CURR_SLOT.num_fixups;
5712 return OPERAND_MATCH;
5714 else if (e->X_op == O_constant)
5715 return OPERAND_MATCH;
5718 case IA64_OPND_CCNT5:
5719 case IA64_OPND_CNT5:
5720 case IA64_OPND_CNT6:
5721 case IA64_OPND_CPOS6a:
5722 case IA64_OPND_CPOS6b:
5723 case IA64_OPND_CPOS6c:
5724 case IA64_OPND_IMMU2:
5725 case IA64_OPND_IMMU7a:
5726 case IA64_OPND_IMMU7b:
5727 case IA64_OPND_IMMU21:
5728 case IA64_OPND_IMMU24:
5729 case IA64_OPND_MBTYPE4:
5730 case IA64_OPND_MHTYPE8:
5731 case IA64_OPND_POS6:
5732 bits = operand_width (idesc->operands[index]);
5733 if (e->X_op == O_constant)
5735 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5736 return OPERAND_MATCH;
5738 return OPERAND_OUT_OF_RANGE;
5742 case IA64_OPND_IMMU9:
5743 bits = operand_width (idesc->operands[index]);
5744 if (e->X_op == O_constant)
5746 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5748 int lobits = e->X_add_number & 0x3;
5749 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5750 e->X_add_number |= (bfd_vma) 0x3;
5751 return OPERAND_MATCH;
5754 return OPERAND_OUT_OF_RANGE;
5758 case IA64_OPND_IMM44:
5759 /* least 16 bits must be zero */
5760 if ((e->X_add_number & 0xffff) != 0)
5761 /* XXX technically, this is wrong: we should not be issuing warning
5762 messages until we're sure this instruction pattern is going to
5764 as_warn (_("lower 16 bits of mask ignored"));
5766 if (e->X_op == O_constant)
5768 if (((e->X_add_number >= 0
5769 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5770 || (e->X_add_number < 0
5771 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5774 if (e->X_add_number >= 0
5775 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5777 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5779 return OPERAND_MATCH;
5782 return OPERAND_OUT_OF_RANGE;
5786 case IA64_OPND_IMM17:
5787 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5788 if (e->X_op == O_constant)
5790 if (((e->X_add_number >= 0
5791 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5792 || (e->X_add_number < 0
5793 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5796 if (e->X_add_number >= 0
5797 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5799 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5801 return OPERAND_MATCH;
5804 return OPERAND_OUT_OF_RANGE;
5808 case IA64_OPND_IMM14:
5809 case IA64_OPND_IMM22:
5811 case IA64_OPND_IMM1:
5812 case IA64_OPND_IMM8:
5813 case IA64_OPND_IMM8U4:
5814 case IA64_OPND_IMM8M1:
5815 case IA64_OPND_IMM8M1U4:
5816 case IA64_OPND_IMM8M1U8:
5817 case IA64_OPND_IMM9a:
5818 case IA64_OPND_IMM9b:
5819 bits = operand_width (idesc->operands[index]);
5820 if (relocatable && (e->X_op == O_symbol
5821 || e->X_op == O_subtract
5822 || e->X_op == O_pseudo_fixup))
5824 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5826 if (idesc->operands[index] == IA64_OPND_IMM14)
5827 fix->code = BFD_RELOC_IA64_IMM14;
5829 fix->code = BFD_RELOC_IA64_IMM22;
5831 if (e->X_op != O_subtract)
5833 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5834 if (e->X_op == O_pseudo_fixup)
5838 fix->opnd = idesc->operands[index];
5841 ++CURR_SLOT.num_fixups;
5842 return OPERAND_MATCH;
5844 else if (e->X_op != O_constant
5845 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5846 return OPERAND_MISMATCH;
5848 if (opnd == IA64_OPND_IMM8M1U4)
5850 /* Zero is not valid for unsigned compares that take an adjusted
5851 constant immediate range. */
5852 if (e->X_add_number == 0)
5853 return OPERAND_OUT_OF_RANGE;
5855 /* Sign-extend 32-bit unsigned numbers, so that the following range
5856 checks will work. */
5857 val = e->X_add_number;
5858 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5859 && ((val & ((bfd_vma) 1 << 31)) != 0))
5860 val = ((val << 32) >> 32);
5862 /* Check for 0x100000000. This is valid because
5863 0x100000000-1 is the same as ((uint32_t) -1). */
5864 if (val == ((bfd_signed_vma) 1 << 32))
5865 return OPERAND_MATCH;
5869 else if (opnd == IA64_OPND_IMM8M1U8)
5871 /* Zero is not valid for unsigned compares that take an adjusted
5872 constant immediate range. */
5873 if (e->X_add_number == 0)
5874 return OPERAND_OUT_OF_RANGE;
5876 /* Check for 0x10000000000000000. */
5877 if (e->X_op == O_big)
5879 if (generic_bignum[0] == 0
5880 && generic_bignum[1] == 0
5881 && generic_bignum[2] == 0
5882 && generic_bignum[3] == 0
5883 && generic_bignum[4] == 1)
5884 return OPERAND_MATCH;
5886 return OPERAND_OUT_OF_RANGE;
5889 val = e->X_add_number - 1;
5891 else if (opnd == IA64_OPND_IMM8M1)
5892 val = e->X_add_number - 1;
5893 else if (opnd == IA64_OPND_IMM8U4)
5895 /* Sign-extend 32-bit unsigned numbers, so that the following range
5896 checks will work. */
5897 val = e->X_add_number;
5898 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5899 && ((val & ((bfd_vma) 1 << 31)) != 0))
5900 val = ((val << 32) >> 32);
5903 val = e->X_add_number;
5905 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5906 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5907 return OPERAND_MATCH;
5909 return OPERAND_OUT_OF_RANGE;
5911 case IA64_OPND_INC3:
5912 /* +/- 1, 4, 8, 16 */
5913 val = e->X_add_number;
5916 if (e->X_op == O_constant)
5918 if ((val == 1 || val == 4 || val == 8 || val == 16))
5919 return OPERAND_MATCH;
5921 return OPERAND_OUT_OF_RANGE;
5925 case IA64_OPND_TGT25:
5926 case IA64_OPND_TGT25b:
5927 case IA64_OPND_TGT25c:
5928 case IA64_OPND_TGT64:
5929 if (e->X_op == O_symbol)
5931 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5932 if (opnd == IA64_OPND_TGT25)
5933 fix->code = BFD_RELOC_IA64_PCREL21F;
5934 else if (opnd == IA64_OPND_TGT25b)
5935 fix->code = BFD_RELOC_IA64_PCREL21M;
5936 else if (opnd == IA64_OPND_TGT25c)
5937 fix->code = BFD_RELOC_IA64_PCREL21B;
5938 else if (opnd == IA64_OPND_TGT64)
5939 fix->code = BFD_RELOC_IA64_PCREL60B;
5943 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5944 fix->opnd = idesc->operands[index];
5947 ++CURR_SLOT.num_fixups;
5948 return OPERAND_MATCH;
5950 case IA64_OPND_TAG13:
5951 case IA64_OPND_TAG13b:
5955 return OPERAND_MATCH;
5958 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5959 /* There are no external relocs for TAG13/TAG13b fields, so we
5960 create a dummy reloc. This will not live past md_apply_fix3. */
5961 fix->code = BFD_RELOC_UNUSED;
5962 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5963 fix->opnd = idesc->operands[index];
5966 ++CURR_SLOT.num_fixups;
5967 return OPERAND_MATCH;
5974 case IA64_OPND_LDXMOV:
5975 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5976 fix->code = BFD_RELOC_IA64_LDXMOV;
5977 fix->opnd = idesc->operands[index];
5980 ++CURR_SLOT.num_fixups;
5981 return OPERAND_MATCH;
5986 return OPERAND_MISMATCH;
5995 memset (e, 0, sizeof (*e));
5998 if (*input_line_pointer != '}')
6000 sep = *input_line_pointer++;
6004 if (!md.manual_bundling)
6005 as_warn ("Found '}' when manual bundling is off");
6007 CURR_SLOT.manual_bundling_off = 1;
6008 md.manual_bundling = 0;
6014 /* Returns the next entry in the opcode table that matches the one in
6015 IDESC, and frees the entry in IDESC. If no matching entry is
6016 found, NULL is returned instead. */
6018 static struct ia64_opcode *
6019 get_next_opcode (struct ia64_opcode *idesc)
6021 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6022 ia64_free_opcode (idesc);
6026 /* Parse the operands for the opcode and find the opcode variant that
6027 matches the specified operands, or NULL if no match is possible. */
6029 static struct ia64_opcode *
6030 parse_operands (idesc)
6031 struct ia64_opcode *idesc;
6033 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6034 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6037 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6038 enum operand_match_result result;
6040 char *first_arg = 0, *end, *saved_input_pointer;
6043 assert (strlen (idesc->name) <= 128);
6045 strcpy (mnemonic, idesc->name);
6046 if (idesc->operands[2] == IA64_OPND_SOF
6047 || idesc->operands[1] == IA64_OPND_SOF)
6049 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6050 can't parse the first operand until we have parsed the
6051 remaining operands of the "alloc" instruction. */
6053 first_arg = input_line_pointer;
6054 end = strchr (input_line_pointer, '=');
6057 as_bad ("Expected separator `='");
6060 input_line_pointer = end + 1;
6067 if (i < NELEMS (CURR_SLOT.opnd))
6069 sep = parse_operand (CURR_SLOT.opnd + i);
6070 if (CURR_SLOT.opnd[i].X_op == O_absent)
6077 sep = parse_operand (&dummy);
6078 if (dummy.X_op == O_absent)
6084 if (sep != '=' && sep != ',')
6089 if (num_outputs > 0)
6090 as_bad ("Duplicate equal sign (=) in instruction");
6092 num_outputs = i + 1;
6097 as_bad ("Illegal operand separator `%c'", sep);
6101 if (idesc->operands[2] == IA64_OPND_SOF
6102 || idesc->operands[1] == IA64_OPND_SOF)
6104 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
6105 know (strcmp (idesc->name, "alloc") == 0);
6106 i = (CURR_SLOT.opnd[1].X_op == O_register
6107 && CURR_SLOT.opnd[1].X_add_number == REG_AR + AR_PFS) ? 2 : 1;
6108 if (num_operands == i + 3 /* first_arg not included in this count! */
6109 && CURR_SLOT.opnd[i].X_op == O_constant
6110 && CURR_SLOT.opnd[i + 1].X_op == O_constant
6111 && CURR_SLOT.opnd[i + 2].X_op == O_constant
6112 && CURR_SLOT.opnd[i + 3].X_op == O_constant)
6114 sof = set_regstack (CURR_SLOT.opnd[i].X_add_number,
6115 CURR_SLOT.opnd[i + 1].X_add_number,
6116 CURR_SLOT.opnd[i + 2].X_add_number,
6117 CURR_SLOT.opnd[i + 3].X_add_number);
6119 /* now we can parse the first arg: */
6120 saved_input_pointer = input_line_pointer;
6121 input_line_pointer = first_arg;
6122 sep = parse_operand (CURR_SLOT.opnd + 0);
6124 --num_outputs; /* force error */
6125 input_line_pointer = saved_input_pointer;
6127 CURR_SLOT.opnd[i].X_add_number = sof;
6128 CURR_SLOT.opnd[i + 1].X_add_number
6129 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6130 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6134 highest_unmatched_operand = -4;
6135 curr_out_of_range_pos = -1;
6137 for (; idesc; idesc = get_next_opcode (idesc))
6139 if (num_outputs != idesc->num_outputs)
6140 continue; /* mismatch in # of outputs */
6141 if (highest_unmatched_operand < 0)
6142 highest_unmatched_operand |= 1;
6143 if (num_operands > NELEMS (idesc->operands)
6144 || (num_operands < NELEMS (idesc->operands)
6145 && idesc->operands[num_operands])
6146 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6147 continue; /* mismatch in number of arguments */
6148 if (highest_unmatched_operand < 0)
6149 highest_unmatched_operand |= 2;
6151 CURR_SLOT.num_fixups = 0;
6153 /* Try to match all operands. If we see an out-of-range operand,
6154 then continue trying to match the rest of the operands, since if
6155 the rest match, then this idesc will give the best error message. */
6157 out_of_range_pos = -1;
6158 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6160 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6161 if (result != OPERAND_MATCH)
6163 if (result != OPERAND_OUT_OF_RANGE)
6165 if (out_of_range_pos < 0)
6166 /* remember position of the first out-of-range operand: */
6167 out_of_range_pos = i;
6171 /* If we did not match all operands, or if at least one operand was
6172 out-of-range, then this idesc does not match. Keep track of which
6173 idesc matched the most operands before failing. If we have two
6174 idescs that failed at the same position, and one had an out-of-range
6175 operand, then prefer the out-of-range operand. Thus if we have
6176 "add r0=0x1000000,r1" we get an error saying the constant is out
6177 of range instead of an error saying that the constant should have been
6180 if (i != num_operands || out_of_range_pos >= 0)
6182 if (i > highest_unmatched_operand
6183 || (i == highest_unmatched_operand
6184 && out_of_range_pos > curr_out_of_range_pos))
6186 highest_unmatched_operand = i;
6187 if (out_of_range_pos >= 0)
6189 expected_operand = idesc->operands[out_of_range_pos];
6190 error_pos = out_of_range_pos;
6194 expected_operand = idesc->operands[i];
6197 curr_out_of_range_pos = out_of_range_pos;
6206 if (expected_operand)
6207 as_bad ("Operand %u of `%s' should be %s",
6208 error_pos + 1, mnemonic,
6209 elf64_ia64_operands[expected_operand].desc);
6210 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6211 as_bad ("Wrong number of output operands");
6212 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6213 as_bad ("Wrong number of input operands");
6215 as_bad ("Operand mismatch");
6219 /* Check that the instruction doesn't use
6220 - r0, f0, or f1 as output operands
6221 - the same predicate twice as output operands
6222 - r0 as address of a base update load or store
6223 - the same GR as output and address of a base update load
6224 - two even- or two odd-numbered FRs as output operands of a floating
6225 point parallel load.
6226 At most two (conflicting) output (or output-like) operands can exist,
6227 (floating point parallel loads have three outputs, but the base register,
6228 if updated, cannot conflict with the actual outputs). */
6230 for (i = 0; i < num_operands; ++i)
6235 switch (idesc->operands[i])
6240 if (i < num_outputs)
6242 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6245 reg1 = CURR_SLOT.opnd[i].X_add_number;
6247 reg2 = CURR_SLOT.opnd[i].X_add_number;
6252 if (i < num_outputs)
6255 reg1 = CURR_SLOT.opnd[i].X_add_number;
6257 reg2 = CURR_SLOT.opnd[i].X_add_number;
6264 if (i < num_outputs)
6266 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6267 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6270 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6273 reg1 = CURR_SLOT.opnd[i].X_add_number;
6275 reg2 = CURR_SLOT.opnd[i].X_add_number;
6279 if (idesc->flags & IA64_OPCODE_POSTINC)
6281 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6284 reg1 = CURR_SLOT.opnd[i].X_add_number;
6286 reg2 = CURR_SLOT.opnd[i].X_add_number;
6297 as_warn ("Invalid use of `%c%d' as output operand", reg_class, regno);
6300 as_warn ("Invalid use of `r%d' as base update address operand", regno);
6306 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6311 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6316 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6324 as_warn ("Invalid duplicate use of `%c%d'", reg_class, reg1);
6326 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6327 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6328 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6329 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6330 && ! ((reg1 ^ reg2) & 1))
6331 as_warn ("Invalid simultaneous use of `f%d' and `f%d'",
6332 reg1 - REG_FR, reg2 - REG_FR);
6333 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6334 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6335 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6336 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6337 as_warn ("Dangerous simultaneous use of `f%d' and `f%d'",
6338 reg1 - REG_FR, reg2 - REG_FR);
6343 build_insn (slot, insnp)
6347 const struct ia64_operand *odesc, *o2desc;
6348 struct ia64_opcode *idesc = slot->idesc;
6354 insn = idesc->opcode | slot->qp_regno;
6356 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6358 if (slot->opnd[i].X_op == O_register
6359 || slot->opnd[i].X_op == O_constant
6360 || slot->opnd[i].X_op == O_index)
6361 val = slot->opnd[i].X_add_number;
6362 else if (slot->opnd[i].X_op == O_big)
6364 /* This must be the value 0x10000000000000000. */
6365 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6371 switch (idesc->operands[i])
6373 case IA64_OPND_IMMU64:
6374 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6375 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6376 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6377 | (((val >> 63) & 0x1) << 36));
6380 case IA64_OPND_IMMU62:
6381 val &= 0x3fffffffffffffffULL;
6382 if (val != slot->opnd[i].X_add_number)
6383 as_warn (_("Value truncated to 62 bits"));
6384 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6385 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6388 case IA64_OPND_TGT64:
6390 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6391 insn |= ((((val >> 59) & 0x1) << 36)
6392 | (((val >> 0) & 0xfffff) << 13));
6423 case IA64_OPND_R3_2:
6424 case IA64_OPND_CPUID_R3:
6425 case IA64_OPND_DBR_R3:
6426 case IA64_OPND_DTR_R3:
6427 case IA64_OPND_ITR_R3:
6428 case IA64_OPND_IBR_R3:
6430 case IA64_OPND_MSR_R3:
6431 case IA64_OPND_PKR_R3:
6432 case IA64_OPND_PMC_R3:
6433 case IA64_OPND_PMD_R3:
6434 case IA64_OPND_RR_R3:
6442 odesc = elf64_ia64_operands + idesc->operands[i];
6443 err = (*odesc->insert) (odesc, val, &insn);
6445 as_bad_where (slot->src_file, slot->src_line,
6446 "Bad operand value: %s", err);
6447 if (idesc->flags & IA64_OPCODE_PSEUDO)
6449 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6450 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6452 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6453 (*o2desc->insert) (o2desc, val, &insn);
6455 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6456 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6457 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6459 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6460 (*o2desc->insert) (o2desc, 64 - val, &insn);
6470 int manual_bundling_off = 0, manual_bundling = 0;
6471 enum ia64_unit required_unit, insn_unit = 0;
6472 enum ia64_insn_type type[3], insn_type;
6473 unsigned int template, orig_template;
6474 bfd_vma insn[3] = { -1, -1, -1 };
6475 struct ia64_opcode *idesc;
6476 int end_of_insn_group = 0, user_template = -1;
6477 int n, i, j, first, curr, last_slot;
6478 unw_rec_list *ptr, *last_ptr, *end_ptr;
6479 bfd_vma t0 = 0, t1 = 0;
6480 struct label_fix *lfix;
6481 struct insn_fix *ifix;
6487 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6488 know (first >= 0 & first < NUM_SLOTS);
6489 n = MIN (3, md.num_slots_in_use);
6491 /* Determine template: user user_template if specified, best match
6494 if (md.slot[first].user_template >= 0)
6495 user_template = template = md.slot[first].user_template;
6498 /* Auto select appropriate template. */
6499 memset (type, 0, sizeof (type));
6501 for (i = 0; i < n; ++i)
6503 if (md.slot[curr].label_fixups && i != 0)
6505 type[i] = md.slot[curr].idesc->type;
6506 curr = (curr + 1) % NUM_SLOTS;
6508 template = best_template[type[0]][type[1]][type[2]];
6511 /* initialize instructions with appropriate nops: */
6512 for (i = 0; i < 3; ++i)
6513 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
6517 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6518 from the start of the frag. */
6519 addr_mod = frag_now_fix () & 15;
6520 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6521 as_bad (_("instruction address is not a multiple of 16"));
6522 frag_now->insn_addr = addr_mod;
6523 frag_now->has_code = 1;
6525 /* now fill in slots with as many insns as possible: */
6527 idesc = md.slot[curr].idesc;
6528 end_of_insn_group = 0;
6530 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6532 /* If we have unwind records, we may need to update some now. */
6533 ptr = md.slot[curr].unwind_record;
6536 /* Find the last prologue/body record in the list for the current
6537 insn, and set the slot number for all records up to that point.
6538 This needs to be done now, because prologue/body records refer to
6539 the current point, not the point after the instruction has been
6540 issued. This matters because there may have been nops emitted
6541 meanwhile. Any non-prologue non-body record followed by a
6542 prologue/body record must also refer to the current point. */
6544 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6545 for (; ptr != end_ptr; ptr = ptr->next)
6546 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6547 || ptr->r.type == body)
6551 /* Make last_ptr point one after the last prologue/body
6553 last_ptr = last_ptr->next;
6554 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6557 ptr->slot_number = (unsigned long) f + i;
6558 ptr->slot_frag = frag_now;
6560 /* Remove the initialized records, so that we won't accidentally
6561 update them again if we insert a nop and continue. */
6562 md.slot[curr].unwind_record = last_ptr;
6566 manual_bundling_off = md.slot[curr].manual_bundling_off;
6567 if (md.slot[curr].manual_bundling_on)
6570 manual_bundling = 1;
6572 break; /* Need to start a new bundle. */
6575 /* If this instruction specifies a template, then it must be the first
6576 instruction of a bundle. */
6577 if (curr != first && md.slot[curr].user_template >= 0)
6580 if (idesc->flags & IA64_OPCODE_SLOT2)
6582 if (manual_bundling && !manual_bundling_off)
6584 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6585 "`%s' must be last in bundle", idesc->name);
6587 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6591 if (idesc->flags & IA64_OPCODE_LAST)
6594 unsigned int required_template;
6596 /* If we need a stop bit after an M slot, our only choice is
6597 template 5 (M;;MI). If we need a stop bit after a B
6598 slot, our only choice is to place it at the end of the
6599 bundle, because the only available templates are MIB,
6600 MBB, BBB, MMB, and MFB. We don't handle anything other
6601 than M and B slots because these are the only kind of
6602 instructions that can have the IA64_OPCODE_LAST bit set. */
6603 required_template = template;
6604 switch (idesc->type)
6608 required_template = 5;
6616 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6617 "Internal error: don't know how to force %s to end"
6618 "of instruction group", idesc->name);
6623 && (i > required_slot
6624 || (required_slot == 2 && !manual_bundling_off)
6625 || (user_template >= 0
6626 /* Changing from MMI to M;MI is OK. */
6627 && (template ^ required_template) > 1)))
6629 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6630 "`%s' must be last in instruction group",
6632 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6633 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6635 if (required_slot < i)
6636 /* Can't fit this instruction. */
6640 if (required_template != template)
6642 /* If we switch the template, we need to reset the NOPs
6643 after slot i. The slot-types of the instructions ahead
6644 of i never change, so we don't need to worry about
6645 changing NOPs in front of this slot. */
6646 for (j = i; j < 3; ++j)
6647 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6649 template = required_template;
6651 if (curr != first && md.slot[curr].label_fixups)
6653 if (manual_bundling)
6655 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6656 "Label must be first in a bundle");
6657 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6659 /* This insn must go into the first slot of a bundle. */
6663 if (end_of_insn_group && md.num_slots_in_use >= 1)
6665 /* We need an instruction group boundary in the middle of a
6666 bundle. See if we can switch to an other template with
6667 an appropriate boundary. */
6669 orig_template = template;
6670 if (i == 1 && (user_template == 4
6671 || (user_template < 0
6672 && (ia64_templ_desc[template].exec_unit[0]
6676 end_of_insn_group = 0;
6678 else if (i == 2 && (user_template == 0
6679 || (user_template < 0
6680 && (ia64_templ_desc[template].exec_unit[1]
6682 /* This test makes sure we don't switch the template if
6683 the next instruction is one that needs to be first in
6684 an instruction group. Since all those instructions are
6685 in the M group, there is no way such an instruction can
6686 fit in this bundle even if we switch the template. The
6687 reason we have to check for this is that otherwise we
6688 may end up generating "MI;;I M.." which has the deadly
6689 effect that the second M instruction is no longer the
6690 first in the group! --davidm 99/12/16 */
6691 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6694 end_of_insn_group = 0;
6697 && user_template == 0
6698 && !(idesc->flags & IA64_OPCODE_FIRST))
6699 /* Use the next slot. */
6701 else if (curr != first)
6702 /* can't fit this insn */
6705 if (template != orig_template)
6706 /* if we switch the template, we need to reset the NOPs
6707 after slot i. The slot-types of the instructions ahead
6708 of i never change, so we don't need to worry about
6709 changing NOPs in front of this slot. */
6710 for (j = i; j < 3; ++j)
6711 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6713 required_unit = ia64_templ_desc[template].exec_unit[i];
6715 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6716 if (idesc->type == IA64_TYPE_DYN)
6718 enum ia64_opnd opnd1, opnd2;
6720 if ((strcmp (idesc->name, "nop") == 0)
6721 || (strcmp (idesc->name, "break") == 0))
6722 insn_unit = required_unit;
6723 else if (strcmp (idesc->name, "hint") == 0)
6725 insn_unit = required_unit;
6726 if (required_unit == IA64_UNIT_B)
6732 case hint_b_warning:
6733 as_warn ("hint in B unit may be treated as nop");
6736 /* When manual bundling is off and there is no
6737 user template, we choose a different unit so
6738 that hint won't go into the current slot. We
6739 will fill the current bundle with nops and
6740 try to put hint into the next bundle. */
6741 if (!manual_bundling && user_template < 0)
6742 insn_unit = IA64_UNIT_I;
6744 as_bad ("hint in B unit can't be used");
6749 else if (strcmp (idesc->name, "chk.s") == 0
6750 || strcmp (idesc->name, "mov") == 0)
6752 insn_unit = IA64_UNIT_M;
6753 if (required_unit == IA64_UNIT_I
6754 || (required_unit == IA64_UNIT_F && template == 6))
6755 insn_unit = IA64_UNIT_I;
6758 as_fatal ("emit_one_bundle: unexpected dynamic op");
6760 sprintf (mnemonic, "%s.%c", idesc->name, "?imbfxx"[insn_unit]);
6761 opnd1 = idesc->operands[0];
6762 opnd2 = idesc->operands[1];
6763 ia64_free_opcode (idesc);
6764 idesc = ia64_find_opcode (mnemonic);
6765 /* moves to/from ARs have collisions */
6766 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6768 while (idesc != NULL
6769 && (idesc->operands[0] != opnd1
6770 || idesc->operands[1] != opnd2))
6771 idesc = get_next_opcode (idesc);
6773 md.slot[curr].idesc = idesc;
6777 insn_type = idesc->type;
6778 insn_unit = IA64_UNIT_NIL;
6782 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6783 insn_unit = required_unit;
6785 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6786 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6787 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6788 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6789 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6794 if (insn_unit != required_unit)
6795 continue; /* Try next slot. */
6797 if (debug_type == DEBUG_DWARF2 || md.slot[curr].loc_directive_seen)
6799 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6801 md.slot[curr].loc_directive_seen = 0;
6802 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6805 build_insn (md.slot + curr, insn + i);
6807 ptr = md.slot[curr].unwind_record;
6810 /* Set slot numbers for all remaining unwind records belonging to the
6811 current insn. There can not be any prologue/body unwind records
6813 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6814 for (; ptr != end_ptr; ptr = ptr->next)
6816 ptr->slot_number = (unsigned long) f + i;
6817 ptr->slot_frag = frag_now;
6819 md.slot[curr].unwind_record = NULL;
6822 if (required_unit == IA64_UNIT_L)
6825 /* skip one slot for long/X-unit instructions */
6828 --md.num_slots_in_use;
6831 /* now is a good time to fix up the labels for this insn: */
6832 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6834 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6835 symbol_set_frag (lfix->sym, frag_now);
6837 /* and fix up the tags also. */
6838 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6840 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6841 symbol_set_frag (lfix->sym, frag_now);
6844 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6846 ifix = md.slot[curr].fixup + j;
6847 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6848 &ifix->expr, ifix->is_pcrel, ifix->code);
6849 fix->tc_fix_data.opnd = ifix->opnd;
6850 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6851 fix->fx_file = md.slot[curr].src_file;
6852 fix->fx_line = md.slot[curr].src_line;
6855 end_of_insn_group = md.slot[curr].end_of_insn_group;
6858 ia64_free_opcode (md.slot[curr].idesc);
6859 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6860 md.slot[curr].user_template = -1;
6862 if (manual_bundling_off)
6864 manual_bundling = 0;
6867 curr = (curr + 1) % NUM_SLOTS;
6868 idesc = md.slot[curr].idesc;
6870 if (manual_bundling > 0)
6872 if (md.num_slots_in_use > 0)
6875 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6876 "`%s' does not fit into bundle", idesc->name);
6877 else if (last_slot < 0)
6879 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6880 "`%s' does not fit into %s template",
6881 idesc->name, ia64_templ_desc[template].name);
6882 /* Drop first insn so we don't livelock. */
6883 --md.num_slots_in_use;
6884 know (curr == first);
6885 ia64_free_opcode (md.slot[curr].idesc);
6886 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6887 md.slot[curr].user_template = -1;
6895 else if (last_slot == 0)
6896 where = "slots 2 or 3";
6899 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6900 "`%s' can't go in %s of %s template",
6901 idesc->name, where, ia64_templ_desc[template].name);
6905 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6906 "Missing '}' at end of file");
6908 know (md.num_slots_in_use < NUM_SLOTS);
6910 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6911 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6913 number_to_chars_littleendian (f + 0, t0, 8);
6914 number_to_chars_littleendian (f + 8, t1, 8);
6918 unwind.list->next_slot_number = (unsigned long) f + 16;
6919 unwind.list->next_slot_frag = frag_now;
6924 md_parse_option (c, arg)
6931 /* Switches from the Intel assembler. */
6933 if (strcmp (arg, "ilp64") == 0
6934 || strcmp (arg, "lp64") == 0
6935 || strcmp (arg, "p64") == 0)
6937 md.flags |= EF_IA_64_ABI64;
6939 else if (strcmp (arg, "ilp32") == 0)
6941 md.flags &= ~EF_IA_64_ABI64;
6943 else if (strcmp (arg, "le") == 0)
6945 md.flags &= ~EF_IA_64_BE;
6946 default_big_endian = 0;
6948 else if (strcmp (arg, "be") == 0)
6950 md.flags |= EF_IA_64_BE;
6951 default_big_endian = 1;
6953 else if (strncmp (arg, "unwind-check=", 13) == 0)
6956 if (strcmp (arg, "warning") == 0)
6957 md.unwind_check = unwind_check_warning;
6958 else if (strcmp (arg, "error") == 0)
6959 md.unwind_check = unwind_check_error;
6963 else if (strncmp (arg, "hint.b=", 7) == 0)
6966 if (strcmp (arg, "ok") == 0)
6967 md.hint_b = hint_b_ok;
6968 else if (strcmp (arg, "warning") == 0)
6969 md.hint_b = hint_b_warning;
6970 else if (strcmp (arg, "error") == 0)
6971 md.hint_b = hint_b_error;
6975 else if (strncmp (arg, "tune=", 5) == 0)
6978 if (strcmp (arg, "itanium1") == 0)
6980 else if (strcmp (arg, "itanium2") == 0)
6990 if (strcmp (arg, "so") == 0)
6992 /* Suppress signon message. */
6994 else if (strcmp (arg, "pi") == 0)
6996 /* Reject privileged instructions. FIXME */
6998 else if (strcmp (arg, "us") == 0)
7000 /* Allow union of signed and unsigned range. FIXME */
7002 else if (strcmp (arg, "close_fcalls") == 0)
7004 /* Do not resolve global function calls. */
7011 /* temp[="prefix"] Insert temporary labels into the object file
7012 symbol table prefixed by "prefix".
7013 Default prefix is ":temp:".
7018 /* indirect=<tgt> Assume unannotated indirect branches behavior
7019 according to <tgt> --
7020 exit: branch out from the current context (default)
7021 labels: all labels in context may be branch targets
7023 if (strncmp (arg, "indirect=", 9) != 0)
7028 /* -X conflicts with an ignored option, use -x instead */
7030 if (!arg || strcmp (arg, "explicit") == 0)
7032 /* set default mode to explicit */
7033 md.default_explicit_mode = 1;
7036 else if (strcmp (arg, "auto") == 0)
7038 md.default_explicit_mode = 0;
7040 else if (strcmp (arg, "none") == 0)
7044 else if (strcmp (arg, "debug") == 0)
7048 else if (strcmp (arg, "debugx") == 0)
7050 md.default_explicit_mode = 1;
7053 else if (strcmp (arg, "debugn") == 0)
7060 as_bad (_("Unrecognized option '-x%s'"), arg);
7065 /* nops Print nops statistics. */
7068 /* GNU specific switches for gcc. */
7069 case OPTION_MCONSTANT_GP:
7070 md.flags |= EF_IA_64_CONS_GP;
7073 case OPTION_MAUTO_PIC:
7074 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7085 md_show_usage (stream)
7090 --mconstant-gp mark output file as using the constant-GP model\n\
7091 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7092 --mauto-pic mark output file as using the constant-GP model\n\
7093 without function descriptors (sets ELF header flag\n\
7094 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7095 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7096 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7097 -mtune=[itanium1|itanium2]\n\
7098 tune for a specific CPU (default -mtune=itanium2)\n\
7099 -munwind-check=[warning|error]\n\
7100 unwind directive check (default -munwind-check=warning)\n\
7101 -mhint.b=[ok|warning|error]\n\
7102 hint.b check (default -mhint.b=error)\n\
7103 -x | -xexplicit turn on dependency violation checking\n\
7104 -xauto automagically remove dependency violations (default)\n\
7105 -xnone turn off dependency violation checking\n\
7106 -xdebug debug dependency violation checker\n\
7107 -xdebugn debug dependency violation checker but turn off\n\
7108 dependency violation checking\n\
7109 -xdebugx debug dependency violation checker and turn on\n\
7110 dependency violation checking\n"),
7115 ia64_after_parse_args ()
7117 if (debug_type == DEBUG_STABS)
7118 as_fatal (_("--gstabs is not supported for ia64"));
7121 /* Return true if TYPE fits in TEMPL at SLOT. */
7124 match (int templ, int type, int slot)
7126 enum ia64_unit unit;
7129 unit = ia64_templ_desc[templ].exec_unit[slot];
7132 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7134 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7136 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7137 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7138 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7139 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7140 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7141 default: result = 0; break;
7146 /* Add a bit of extra goodness if a nop of type F or B would fit
7147 in TEMPL at SLOT. */
7150 extra_goodness (int templ, int slot)
7155 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7157 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7163 if (match (templ, IA64_TYPE_M, slot)
7164 || match (templ, IA64_TYPE_I, slot))
7165 /* Favor M- and I-unit NOPs. We definitely want to avoid
7166 F-unit and B-unit may cause split-issue or less-than-optimal
7167 branch-prediction. */
7178 /* This function is called once, at assembler startup time. It sets
7179 up all the tables, etc. that the MD part of the assembler will need
7180 that can be determined before arguments are parsed. */
7184 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
7189 md.explicit_mode = md.default_explicit_mode;
7191 bfd_set_section_alignment (stdoutput, text_section, 4);
7193 /* Make sure function pointers get initialized. */
7194 target_big_endian = -1;
7195 dot_byteorder (default_big_endian);
7197 alias_hash = hash_new ();
7198 alias_name_hash = hash_new ();
7199 secalias_hash = hash_new ();
7200 secalias_name_hash = hash_new ();
7202 pseudo_func[FUNC_DTP_MODULE].u.sym =
7203 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7204 &zero_address_frag);
7206 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7207 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7208 &zero_address_frag);
7210 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7211 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7212 &zero_address_frag);
7214 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7215 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7216 &zero_address_frag);
7218 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7219 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7220 &zero_address_frag);
7222 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7223 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7224 &zero_address_frag);
7226 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7227 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7228 &zero_address_frag);
7230 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7231 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7232 &zero_address_frag);
7234 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7235 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7236 &zero_address_frag);
7238 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7239 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7240 &zero_address_frag);
7242 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7243 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7244 &zero_address_frag);
7246 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7247 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7248 &zero_address_frag);
7250 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7251 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7252 &zero_address_frag);
7254 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7255 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7256 &zero_address_frag);
7258 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7259 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7260 &zero_address_frag);
7262 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7263 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7264 &zero_address_frag);
7266 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7267 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7268 &zero_address_frag);
7270 if (md.tune != itanium1)
7272 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7274 le_nop_stop[0] = 0x9;
7277 /* Compute the table of best templates. We compute goodness as a
7278 base 4 value, in which each match counts for 3. Match-failures
7279 result in NOPs and we use extra_goodness() to pick the execution
7280 units that are best suited for issuing the NOP. */
7281 for (i = 0; i < IA64_NUM_TYPES; ++i)
7282 for (j = 0; j < IA64_NUM_TYPES; ++j)
7283 for (k = 0; k < IA64_NUM_TYPES; ++k)
7286 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7289 if (match (t, i, 0))
7291 if (match (t, j, 1))
7293 if (match (t, k, 2))
7294 goodness = 3 + 3 + 3;
7296 goodness = 3 + 3 + extra_goodness (t, 2);
7298 else if (match (t, j, 2))
7299 goodness = 3 + 3 + extra_goodness (t, 1);
7303 goodness += extra_goodness (t, 1);
7304 goodness += extra_goodness (t, 2);
7307 else if (match (t, i, 1))
7309 if (match (t, j, 2))
7312 goodness = 3 + extra_goodness (t, 2);
7314 else if (match (t, i, 2))
7315 goodness = 3 + extra_goodness (t, 1);
7317 if (goodness > best)
7320 best_template[i][j][k] = t;
7325 for (i = 0; i < NUM_SLOTS; ++i)
7326 md.slot[i].user_template = -1;
7328 md.pseudo_hash = hash_new ();
7329 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7331 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7332 (void *) (pseudo_opcode + i));
7334 as_fatal ("ia64.md_begin: can't hash `%s': %s",
7335 pseudo_opcode[i].name, err);
7338 md.reg_hash = hash_new ();
7339 md.dynreg_hash = hash_new ();
7340 md.const_hash = hash_new ();
7341 md.entry_hash = hash_new ();
7343 /* general registers: */
7346 for (i = 0; i < total; ++i)
7348 sprintf (name, "r%d", i - REG_GR);
7349 md.regsym[i] = declare_register (name, i);
7352 /* floating point registers: */
7354 for (; i < total; ++i)
7356 sprintf (name, "f%d", i - REG_FR);
7357 md.regsym[i] = declare_register (name, i);
7360 /* application registers: */
7363 for (; i < total; ++i)
7365 sprintf (name, "ar%d", i - REG_AR);
7366 md.regsym[i] = declare_register (name, i);
7369 /* control registers: */
7372 for (; i < total; ++i)
7374 sprintf (name, "cr%d", i - REG_CR);
7375 md.regsym[i] = declare_register (name, i);
7378 /* predicate registers: */
7380 for (; i < total; ++i)
7382 sprintf (name, "p%d", i - REG_P);
7383 md.regsym[i] = declare_register (name, i);
7386 /* branch registers: */
7388 for (; i < total; ++i)
7390 sprintf (name, "b%d", i - REG_BR);
7391 md.regsym[i] = declare_register (name, i);
7394 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
7395 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
7396 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
7397 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
7398 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
7399 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
7400 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
7402 for (i = 0; i < NELEMS (indirect_reg); ++i)
7404 regnum = indirect_reg[i].regnum;
7405 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
7408 /* define synonyms for application registers: */
7409 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
7410 md.regsym[i] = declare_register (ar[i - REG_AR].name,
7411 REG_AR + ar[i - REG_AR].regnum);
7413 /* define synonyms for control registers: */
7414 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
7415 md.regsym[i] = declare_register (cr[i - REG_CR].name,
7416 REG_CR + cr[i - REG_CR].regnum);
7418 declare_register ("gp", REG_GR + 1);
7419 declare_register ("sp", REG_GR + 12);
7420 declare_register ("rp", REG_BR + 0);
7422 /* pseudo-registers used to specify unwind info: */
7423 declare_register ("psp", REG_PSP);
7425 declare_register_set ("ret", 4, REG_GR + 8);
7426 declare_register_set ("farg", 8, REG_FR + 8);
7427 declare_register_set ("fret", 8, REG_FR + 8);
7429 for (i = 0; i < NELEMS (const_bits); ++i)
7431 err = hash_insert (md.const_hash, const_bits[i].name,
7432 (PTR) (const_bits + i));
7434 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
7438 /* Set the architecture and machine depending on defaults and command line
7440 if (md.flags & EF_IA_64_ABI64)
7441 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7443 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7446 as_warn (_("Could not set architecture and machine"));
7448 /* Set the pointer size and pointer shift size depending on md.flags */
7450 if (md.flags & EF_IA_64_ABI64)
7452 md.pointer_size = 8; /* pointers are 8 bytes */
7453 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7457 md.pointer_size = 4; /* pointers are 4 bytes */
7458 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7461 md.mem_offset.hint = 0;
7464 md.entry_labels = NULL;
7467 /* Set the default options in md. Cannot do this in md_begin because
7468 that is called after md_parse_option which is where we set the
7469 options in md based on command line options. */
7472 ia64_init (argc, argv)
7473 int argc ATTRIBUTE_UNUSED;
7474 char **argv ATTRIBUTE_UNUSED;
7476 md.flags = MD_FLAGS_DEFAULT;
7478 /* FIXME: We should change it to unwind_check_error someday. */
7479 md.unwind_check = unwind_check_warning;
7480 md.hint_b = hint_b_error;
7484 /* Return a string for the target object file format. */
7487 ia64_target_format ()
7489 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7491 if (md.flags & EF_IA_64_BE)
7493 if (md.flags & EF_IA_64_ABI64)
7494 #if defined(TE_AIX50)
7495 return "elf64-ia64-aix-big";
7496 #elif defined(TE_HPUX)
7497 return "elf64-ia64-hpux-big";
7499 return "elf64-ia64-big";
7502 #if defined(TE_AIX50)
7503 return "elf32-ia64-aix-big";
7504 #elif defined(TE_HPUX)
7505 return "elf32-ia64-hpux-big";
7507 return "elf32-ia64-big";
7512 if (md.flags & EF_IA_64_ABI64)
7514 return "elf64-ia64-aix-little";
7516 return "elf64-ia64-little";
7520 return "elf32-ia64-aix-little";
7522 return "elf32-ia64-little";
7527 return "unknown-format";
7531 ia64_end_of_source ()
7533 /* terminate insn group upon reaching end of file: */
7534 insn_group_break (1, 0, 0);
7536 /* emits slots we haven't written yet: */
7537 ia64_flush_insns ();
7539 bfd_set_private_flags (stdoutput, md.flags);
7541 md.mem_offset.hint = 0;
7547 if (md.qp.X_op == O_register)
7548 as_bad ("qualifying predicate not followed by instruction");
7549 md.qp.X_op = O_absent;
7551 if (ignore_input ())
7554 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7556 if (md.detect_dv && !md.explicit_mode)
7563 as_warn (_("Explicit stops are ignored in auto mode"));
7567 insn_group_break (1, 0, 0);
7571 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7573 static int defining_tag = 0;
7576 ia64_unrecognized_line (ch)
7582 expression (&md.qp);
7583 if (*input_line_pointer++ != ')')
7585 as_bad ("Expected ')'");
7588 if (md.qp.X_op != O_register)
7590 as_bad ("Qualifying predicate expected");
7593 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7595 as_bad ("Predicate register expected");
7601 if (md.manual_bundling)
7602 as_warn ("Found '{' when manual bundling is already turned on");
7604 CURR_SLOT.manual_bundling_on = 1;
7605 md.manual_bundling = 1;
7607 /* Bundling is only acceptable in explicit mode
7608 or when in default automatic mode. */
7609 if (md.detect_dv && !md.explicit_mode)
7611 if (!md.mode_explicitly_set
7612 && !md.default_explicit_mode)
7615 as_warn (_("Found '{' after explicit switch to automatic mode"));
7620 if (!md.manual_bundling)
7621 as_warn ("Found '}' when manual bundling is off");
7623 PREV_SLOT.manual_bundling_off = 1;
7624 md.manual_bundling = 0;
7626 /* switch back to automatic mode, if applicable */
7629 && !md.mode_explicitly_set
7630 && !md.default_explicit_mode)
7633 /* Allow '{' to follow on the same line. We also allow ";;", but that
7634 happens automatically because ';' is an end of line marker. */
7636 if (input_line_pointer[0] == '{')
7638 input_line_pointer++;
7639 return ia64_unrecognized_line ('{');
7642 demand_empty_rest_of_line ();
7652 if (md.qp.X_op == O_register)
7654 as_bad ("Tag must come before qualifying predicate.");
7658 /* This implements just enough of read_a_source_file in read.c to
7659 recognize labels. */
7660 if (is_name_beginner (*input_line_pointer))
7662 s = input_line_pointer;
7663 c = get_symbol_end ();
7665 else if (LOCAL_LABELS_FB
7666 && ISDIGIT (*input_line_pointer))
7669 while (ISDIGIT (*input_line_pointer))
7670 temp = (temp * 10) + *input_line_pointer++ - '0';
7671 fb_label_instance_inc (temp);
7672 s = fb_label_name (temp, 0);
7673 c = *input_line_pointer;
7682 /* Put ':' back for error messages' sake. */
7683 *input_line_pointer++ = ':';
7684 as_bad ("Expected ':'");
7691 /* Put ':' back for error messages' sake. */
7692 *input_line_pointer++ = ':';
7693 if (*input_line_pointer++ != ']')
7695 as_bad ("Expected ']'");
7700 as_bad ("Tag name expected");
7710 /* Not a valid line. */
7715 ia64_frob_label (sym)
7718 struct label_fix *fix;
7720 /* Tags need special handling since they are not bundle breaks like
7724 fix = obstack_alloc (¬es, sizeof (*fix));
7726 fix->next = CURR_SLOT.tag_fixups;
7727 CURR_SLOT.tag_fixups = fix;
7732 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7734 md.last_text_seg = now_seg;
7735 fix = obstack_alloc (¬es, sizeof (*fix));
7737 fix->next = CURR_SLOT.label_fixups;
7738 CURR_SLOT.label_fixups = fix;
7740 /* Keep track of how many code entry points we've seen. */
7741 if (md.path == md.maxpaths)
7744 md.entry_labels = (const char **)
7745 xrealloc ((void *) md.entry_labels,
7746 md.maxpaths * sizeof (char *));
7748 md.entry_labels[md.path++] = S_GET_NAME (sym);
7753 /* The HP-UX linker will give unresolved symbol errors for symbols
7754 that are declared but unused. This routine removes declared,
7755 unused symbols from an object. */
7757 ia64_frob_symbol (sym)
7760 if ((S_GET_SEGMENT (sym) == &bfd_und_section && ! symbol_used_p (sym) &&
7761 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7762 || (S_GET_SEGMENT (sym) == &bfd_abs_section
7763 && ! S_IS_EXTERNAL (sym)))
7770 ia64_flush_pending_output ()
7772 if (!md.keep_pending_output
7773 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7775 /* ??? This causes many unnecessary stop bits to be emitted.
7776 Unfortunately, it isn't clear if it is safe to remove this. */
7777 insn_group_break (1, 0, 0);
7778 ia64_flush_insns ();
7782 /* Do ia64-specific expression optimization. All that's done here is
7783 to transform index expressions that are either due to the indexing
7784 of rotating registers or due to the indexing of indirect register
7787 ia64_optimize_expr (l, op, r)
7796 if (l->X_op == O_register && r->X_op == O_constant)
7798 num_regs = (l->X_add_number >> 16);
7799 if ((unsigned) r->X_add_number >= num_regs)
7802 as_bad ("No current frame");
7804 as_bad ("Index out of range 0..%u", num_regs - 1);
7805 r->X_add_number = 0;
7807 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7810 else if (l->X_op == O_register && r->X_op == O_register)
7812 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
7813 || l->X_add_number == IND_MEM)
7815 as_bad ("Indirect register set name expected");
7816 l->X_add_number = IND_CPUID;
7819 l->X_op_symbol = md.regsym[l->X_add_number];
7820 l->X_add_number = r->X_add_number;
7828 ia64_parse_name (name, e, nextcharP)
7833 struct const_desc *cdesc;
7834 struct dynreg *dr = 0;
7841 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7843 /* Find what relocation pseudo-function we're dealing with. */
7844 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7845 if (pseudo_func[idx].name
7846 && pseudo_func[idx].name[0] == name[1]
7847 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7849 pseudo_type = pseudo_func[idx].type;
7852 switch (pseudo_type)
7854 case PSEUDO_FUNC_RELOC:
7855 end = input_line_pointer;
7856 if (*nextcharP != '(')
7858 as_bad ("Expected '('");
7862 ++input_line_pointer;
7864 if (*input_line_pointer != ')')
7866 as_bad ("Missing ')'");
7870 ++input_line_pointer;
7871 if (e->X_op != O_symbol)
7873 if (e->X_op != O_pseudo_fixup)
7875 as_bad ("Not a symbolic expression");
7878 if (idx != FUNC_LT_RELATIVE)
7880 as_bad ("Illegal combination of relocation functions");
7883 switch (S_GET_VALUE (e->X_op_symbol))
7885 case FUNC_FPTR_RELATIVE:
7886 idx = FUNC_LT_FPTR_RELATIVE; break;
7887 case FUNC_DTP_MODULE:
7888 idx = FUNC_LT_DTP_MODULE; break;
7889 case FUNC_DTP_RELATIVE:
7890 idx = FUNC_LT_DTP_RELATIVE; break;
7891 case FUNC_TP_RELATIVE:
7892 idx = FUNC_LT_TP_RELATIVE; break;
7894 as_bad ("Illegal combination of relocation functions");
7898 /* Make sure gas doesn't get rid of local symbols that are used
7900 e->X_op = O_pseudo_fixup;
7901 e->X_op_symbol = pseudo_func[idx].u.sym;
7903 *nextcharP = *input_line_pointer;
7906 case PSEUDO_FUNC_CONST:
7907 e->X_op = O_constant;
7908 e->X_add_number = pseudo_func[idx].u.ival;
7911 case PSEUDO_FUNC_REG:
7912 e->X_op = O_register;
7913 e->X_add_number = pseudo_func[idx].u.ival;
7922 /* first see if NAME is a known register name: */
7923 sym = hash_find (md.reg_hash, name);
7926 e->X_op = O_register;
7927 e->X_add_number = S_GET_VALUE (sym);
7931 cdesc = hash_find (md.const_hash, name);
7934 e->X_op = O_constant;
7935 e->X_add_number = cdesc->value;
7939 /* check for inN, locN, or outN: */
7944 if (name[1] == 'n' && ISDIGIT (name[2]))
7952 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7960 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
7971 /* Ignore register numbers with leading zeroes, except zero itself. */
7972 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
7974 unsigned long regnum;
7976 /* The name is inN, locN, or outN; parse the register number. */
7977 regnum = strtoul (name + idx, &end, 10);
7978 if (end > name + idx && *end == '\0' && regnum < 96)
7980 if (regnum >= dr->num_regs)
7983 as_bad ("No current frame");
7985 as_bad ("Register number out of range 0..%u",
7989 e->X_op = O_register;
7990 e->X_add_number = dr->base + regnum;
7995 end = alloca (strlen (name) + 1);
7997 name = ia64_canonicalize_symbol_name (end);
7998 if ((dr = hash_find (md.dynreg_hash, name)))
8000 /* We've got ourselves the name of a rotating register set.
8001 Store the base register number in the low 16 bits of
8002 X_add_number and the size of the register set in the top 16
8004 e->X_op = O_register;
8005 e->X_add_number = dr->base | (dr->num_regs << 16);
8011 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8014 ia64_canonicalize_symbol_name (name)
8017 size_t len = strlen (name), full = len;
8019 while (len > 0 && name[len - 1] == '#')
8024 as_bad ("Standalone `#' is illegal");
8026 else if (len < full - 1)
8027 as_warn ("Redundant `#' suffix operators");
8032 /* Return true if idesc is a conditional branch instruction. This excludes
8033 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8034 because they always read/write resources regardless of the value of the
8035 qualifying predicate. br.ia must always use p0, and hence is always
8036 taken. Thus this function returns true for branches which can fall
8037 through, and which use no resources if they do fall through. */
8040 is_conditional_branch (idesc)
8041 struct ia64_opcode *idesc;
8043 /* br is a conditional branch. Everything that starts with br. except
8044 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8045 Everything that starts with brl is a conditional branch. */
8046 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8047 && (idesc->name[2] == '\0'
8048 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8049 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8050 || idesc->name[2] == 'l'
8051 /* br.cond, br.call, br.clr */
8052 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8053 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8054 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8057 /* Return whether the given opcode is a taken branch. If there's any doubt,
8061 is_taken_branch (idesc)
8062 struct ia64_opcode *idesc;
8064 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8065 || strncmp (idesc->name, "br.ia", 5) == 0);
8068 /* Return whether the given opcode is an interruption or rfi. If there's any
8069 doubt, returns zero. */
8072 is_interruption_or_rfi (idesc)
8073 struct ia64_opcode *idesc;
8075 if (strcmp (idesc->name, "rfi") == 0)
8080 /* Returns the index of the given dependency in the opcode's list of chks, or
8081 -1 if there is no dependency. */
8084 depends_on (depind, idesc)
8086 struct ia64_opcode *idesc;
8089 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8090 for (i = 0; i < dep->nchks; i++)
8092 if (depind == DEP (dep->chks[i]))
8098 /* Determine a set of specific resources used for a particular resource
8099 class. Returns the number of specific resources identified For those
8100 cases which are not determinable statically, the resource returned is
8103 Meanings of value in 'NOTE':
8104 1) only read/write when the register number is explicitly encoded in the
8106 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8107 accesses CFM when qualifying predicate is in the rotating region.
8108 3) general register value is used to specify an indirect register; not
8109 determinable statically.
8110 4) only read the given resource when bits 7:0 of the indirect index
8111 register value does not match the register number of the resource; not
8112 determinable statically.
8113 5) all rules are implementation specific.
8114 6) only when both the index specified by the reader and the index specified
8115 by the writer have the same value in bits 63:61; not determinable
8117 7) only access the specified resource when the corresponding mask bit is
8119 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8120 only read when these insns reference FR2-31
8121 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8122 written when these insns write FR32-127
8123 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8125 11) The target predicates are written independently of PR[qp], but source
8126 registers are only read if PR[qp] is true. Since the state of PR[qp]
8127 cannot statically be determined, all source registers are marked used.
8128 12) This insn only reads the specified predicate register when that
8129 register is the PR[qp].
8130 13) This reference to ld-c only applies to teh GR whose value is loaded
8131 with data returned from memory, not the post-incremented address register.
8132 14) The RSE resource includes the implementation-specific RSE internal
8133 state resources. At least one (and possibly more) of these resources are
8134 read by each instruction listed in IC:rse-readers. At least one (and
8135 possibly more) of these resources are written by each insn listed in
8137 15+16) Represents reserved instructions, which the assembler does not
8140 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8141 this code; there are no dependency violations based on memory access.
8144 #define MAX_SPECS 256
8149 specify_resource (dep, idesc, type, specs, note, path)
8150 const struct ia64_dependency *dep;
8151 struct ia64_opcode *idesc;
8152 int type; /* is this a DV chk or a DV reg? */
8153 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
8154 int note; /* resource note for this insn's usage */
8155 int path; /* which execution path to examine */
8162 if (dep->mode == IA64_DV_WAW
8163 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8164 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8167 /* template for any resources we identify */
8168 tmpl.dependency = dep;
8170 tmpl.insn_srlz = tmpl.data_srlz = 0;
8171 tmpl.qp_regno = CURR_SLOT.qp_regno;
8172 tmpl.link_to_qp_branch = 1;
8173 tmpl.mem_offset.hint = 0;
8176 tmpl.cmp_type = CMP_NONE;
8179 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8180 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8181 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8183 /* we don't need to track these */
8184 if (dep->semantics == IA64_DVS_NONE)
8187 switch (dep->specifier)
8192 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8194 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8195 if (regno >= 0 && regno <= 7)
8197 specs[count] = tmpl;
8198 specs[count++].index = regno;
8204 for (i = 0; i < 8; i++)
8206 specs[count] = tmpl;
8207 specs[count++].index = i;
8216 case IA64_RS_AR_UNAT:
8217 /* This is a mov =AR or mov AR= instruction. */
8218 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8220 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8221 if (regno == AR_UNAT)
8223 specs[count++] = tmpl;
8228 /* This is a spill/fill, or other instruction that modifies the
8231 /* Unless we can determine the specific bits used, mark the whole
8232 thing; bits 8:3 of the memory address indicate the bit used in
8233 UNAT. The .mem.offset hint may be used to eliminate a small
8234 subset of conflicts. */
8235 specs[count] = tmpl;
8236 if (md.mem_offset.hint)
8239 fprintf (stderr, " Using hint for spill/fill\n");
8240 /* The index isn't actually used, just set it to something
8241 approximating the bit index. */
8242 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8243 specs[count].mem_offset.hint = 1;
8244 specs[count].mem_offset.offset = md.mem_offset.offset;
8245 specs[count++].mem_offset.base = md.mem_offset.base;
8249 specs[count++].specific = 0;
8257 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8259 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8260 if ((regno >= 8 && regno <= 15)
8261 || (regno >= 20 && regno <= 23)
8262 || (regno >= 31 && regno <= 39)
8263 || (regno >= 41 && regno <= 47)
8264 || (regno >= 67 && regno <= 111))
8266 specs[count] = tmpl;
8267 specs[count++].index = regno;
8280 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8282 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8283 if ((regno >= 48 && regno <= 63)
8284 || (regno >= 112 && regno <= 127))
8286 specs[count] = tmpl;
8287 specs[count++].index = regno;
8293 for (i = 48; i < 64; i++)
8295 specs[count] = tmpl;
8296 specs[count++].index = i;
8298 for (i = 112; i < 128; i++)
8300 specs[count] = tmpl;
8301 specs[count++].index = i;
8319 for (i = 0; i < idesc->num_outputs; i++)
8320 if (idesc->operands[i] == IA64_OPND_B1
8321 || idesc->operands[i] == IA64_OPND_B2)
8323 specs[count] = tmpl;
8324 specs[count++].index =
8325 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8330 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8331 if (idesc->operands[i] == IA64_OPND_B1
8332 || idesc->operands[i] == IA64_OPND_B2)
8334 specs[count] = tmpl;
8335 specs[count++].index =
8336 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8342 case IA64_RS_CPUID: /* four or more registers */
8345 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8347 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8348 if (regno >= 0 && regno < NELEMS (gr_values)
8351 specs[count] = tmpl;
8352 specs[count++].index = gr_values[regno].value & 0xFF;
8356 specs[count] = tmpl;
8357 specs[count++].specific = 0;
8367 case IA64_RS_DBR: /* four or more registers */
8370 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8372 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8373 if (regno >= 0 && regno < NELEMS (gr_values)
8376 specs[count] = tmpl;
8377 specs[count++].index = gr_values[regno].value & 0xFF;
8381 specs[count] = tmpl;
8382 specs[count++].specific = 0;
8386 else if (note == 0 && !rsrc_write)
8388 specs[count] = tmpl;
8389 specs[count++].specific = 0;
8397 case IA64_RS_IBR: /* four or more registers */
8400 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8402 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8403 if (regno >= 0 && regno < NELEMS (gr_values)
8406 specs[count] = tmpl;
8407 specs[count++].index = gr_values[regno].value & 0xFF;
8411 specs[count] = tmpl;
8412 specs[count++].specific = 0;
8425 /* These are implementation specific. Force all references to
8426 conflict with all other references. */
8427 specs[count] = tmpl;
8428 specs[count++].specific = 0;
8436 case IA64_RS_PKR: /* 16 or more registers */
8437 if (note == 3 || note == 4)
8439 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8441 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8442 if (regno >= 0 && regno < NELEMS (gr_values)
8447 specs[count] = tmpl;
8448 specs[count++].index = gr_values[regno].value & 0xFF;
8451 for (i = 0; i < NELEMS (gr_values); i++)
8453 /* Uses all registers *except* the one in R3. */
8454 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8456 specs[count] = tmpl;
8457 specs[count++].index = i;
8463 specs[count] = tmpl;
8464 specs[count++].specific = 0;
8471 specs[count] = tmpl;
8472 specs[count++].specific = 0;
8476 case IA64_RS_PMC: /* four or more registers */
8479 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8480 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8483 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8485 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
8486 if (regno >= 0 && regno < NELEMS (gr_values)
8489 specs[count] = tmpl;
8490 specs[count++].index = gr_values[regno].value & 0xFF;
8494 specs[count] = tmpl;
8495 specs[count++].specific = 0;
8505 case IA64_RS_PMD: /* four or more registers */
8508 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8510 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8511 if (regno >= 0 && regno < NELEMS (gr_values)
8514 specs[count] = tmpl;
8515 specs[count++].index = gr_values[regno].value & 0xFF;
8519 specs[count] = tmpl;
8520 specs[count++].specific = 0;
8530 case IA64_RS_RR: /* eight registers */
8533 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8535 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8536 if (regno >= 0 && regno < NELEMS (gr_values)
8539 specs[count] = tmpl;
8540 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8544 specs[count] = tmpl;
8545 specs[count++].specific = 0;
8549 else if (note == 0 && !rsrc_write)
8551 specs[count] = tmpl;
8552 specs[count++].specific = 0;
8560 case IA64_RS_CR_IRR:
8563 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8564 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8566 && idesc->operands[1] == IA64_OPND_CR3
8569 for (i = 0; i < 4; i++)
8571 specs[count] = tmpl;
8572 specs[count++].index = CR_IRR0 + i;
8578 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8579 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8581 && regno <= CR_IRR3)
8583 specs[count] = tmpl;
8584 specs[count++].index = regno;
8593 case IA64_RS_CR_LRR:
8600 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8601 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8602 && (regno == CR_LRR0 || regno == CR_LRR1))
8604 specs[count] = tmpl;
8605 specs[count++].index = regno;
8613 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8615 specs[count] = tmpl;
8616 specs[count++].index =
8617 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8632 else if (rsrc_write)
8634 if (dep->specifier == IA64_RS_FRb
8635 && idesc->operands[0] == IA64_OPND_F1)
8637 specs[count] = tmpl;
8638 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8643 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8645 if (idesc->operands[i] == IA64_OPND_F2
8646 || idesc->operands[i] == IA64_OPND_F3
8647 || idesc->operands[i] == IA64_OPND_F4)
8649 specs[count] = tmpl;
8650 specs[count++].index =
8651 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8660 /* This reference applies only to the GR whose value is loaded with
8661 data returned from memory. */
8662 specs[count] = tmpl;
8663 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8669 for (i = 0; i < idesc->num_outputs; i++)
8670 if (idesc->operands[i] == IA64_OPND_R1
8671 || idesc->operands[i] == IA64_OPND_R2
8672 || idesc->operands[i] == IA64_OPND_R3)
8674 specs[count] = tmpl;
8675 specs[count++].index =
8676 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8678 if (idesc->flags & IA64_OPCODE_POSTINC)
8679 for (i = 0; i < NELEMS (idesc->operands); i++)
8680 if (idesc->operands[i] == IA64_OPND_MR3)
8682 specs[count] = tmpl;
8683 specs[count++].index =
8684 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8689 /* Look for anything that reads a GR. */
8690 for (i = 0; i < NELEMS (idesc->operands); i++)
8692 if (idesc->operands[i] == IA64_OPND_MR3
8693 || idesc->operands[i] == IA64_OPND_CPUID_R3
8694 || idesc->operands[i] == IA64_OPND_DBR_R3
8695 || idesc->operands[i] == IA64_OPND_IBR_R3
8696 || idesc->operands[i] == IA64_OPND_MSR_R3
8697 || idesc->operands[i] == IA64_OPND_PKR_R3
8698 || idesc->operands[i] == IA64_OPND_PMC_R3
8699 || idesc->operands[i] == IA64_OPND_PMD_R3
8700 || idesc->operands[i] == IA64_OPND_RR_R3
8701 || ((i >= idesc->num_outputs)
8702 && (idesc->operands[i] == IA64_OPND_R1
8703 || idesc->operands[i] == IA64_OPND_R2
8704 || idesc->operands[i] == IA64_OPND_R3
8705 /* addl source register. */
8706 || idesc->operands[i] == IA64_OPND_R3_2)))
8708 specs[count] = tmpl;
8709 specs[count++].index =
8710 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8721 /* This is the same as IA64_RS_PRr, except that the register range is
8722 from 1 - 15, and there are no rotating register reads/writes here. */
8726 for (i = 1; i < 16; i++)
8728 specs[count] = tmpl;
8729 specs[count++].index = i;
8735 /* Mark only those registers indicated by the mask. */
8738 mask = CURR_SLOT.opnd[2].X_add_number;
8739 for (i = 1; i < 16; i++)
8740 if (mask & ((valueT) 1 << i))
8742 specs[count] = tmpl;
8743 specs[count++].index = i;
8751 else if (note == 11) /* note 11 implies note 1 as well */
8755 for (i = 0; i < idesc->num_outputs; i++)
8757 if (idesc->operands[i] == IA64_OPND_P1
8758 || idesc->operands[i] == IA64_OPND_P2)
8760 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8761 if (regno >= 1 && regno < 16)
8763 specs[count] = tmpl;
8764 specs[count++].index = regno;
8774 else if (note == 12)
8776 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8778 specs[count] = tmpl;
8779 specs[count++].index = CURR_SLOT.qp_regno;
8786 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8787 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8788 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8789 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8791 if ((idesc->operands[0] == IA64_OPND_P1
8792 || idesc->operands[0] == IA64_OPND_P2)
8793 && p1 >= 1 && p1 < 16)
8795 specs[count] = tmpl;
8796 specs[count].cmp_type =
8797 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8798 specs[count++].index = p1;
8800 if ((idesc->operands[1] == IA64_OPND_P1
8801 || idesc->operands[1] == IA64_OPND_P2)
8802 && p2 >= 1 && p2 < 16)
8804 specs[count] = tmpl;
8805 specs[count].cmp_type =
8806 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8807 specs[count++].index = p2;
8812 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8814 specs[count] = tmpl;
8815 specs[count++].index = CURR_SLOT.qp_regno;
8817 if (idesc->operands[1] == IA64_OPND_PR)
8819 for (i = 1; i < 16; i++)
8821 specs[count] = tmpl;
8822 specs[count++].index = i;
8833 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8834 simplified cases of this. */
8838 for (i = 16; i < 63; i++)
8840 specs[count] = tmpl;
8841 specs[count++].index = i;
8847 /* Mark only those registers indicated by the mask. */
8849 && idesc->operands[0] == IA64_OPND_PR)
8851 mask = CURR_SLOT.opnd[2].X_add_number;
8852 if (mask & ((valueT) 1 << 16))
8853 for (i = 16; i < 63; i++)
8855 specs[count] = tmpl;
8856 specs[count++].index = i;
8860 && idesc->operands[0] == IA64_OPND_PR_ROT)
8862 for (i = 16; i < 63; i++)
8864 specs[count] = tmpl;
8865 specs[count++].index = i;
8873 else if (note == 11) /* note 11 implies note 1 as well */
8877 for (i = 0; i < idesc->num_outputs; i++)
8879 if (idesc->operands[i] == IA64_OPND_P1
8880 || idesc->operands[i] == IA64_OPND_P2)
8882 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8883 if (regno >= 16 && regno < 63)
8885 specs[count] = tmpl;
8886 specs[count++].index = regno;
8896 else if (note == 12)
8898 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8900 specs[count] = tmpl;
8901 specs[count++].index = CURR_SLOT.qp_regno;
8908 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8909 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8910 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8911 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8913 if ((idesc->operands[0] == IA64_OPND_P1
8914 || idesc->operands[0] == IA64_OPND_P2)
8915 && p1 >= 16 && p1 < 63)
8917 specs[count] = tmpl;
8918 specs[count].cmp_type =
8919 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8920 specs[count++].index = p1;
8922 if ((idesc->operands[1] == IA64_OPND_P1
8923 || idesc->operands[1] == IA64_OPND_P2)
8924 && p2 >= 16 && p2 < 63)
8926 specs[count] = tmpl;
8927 specs[count].cmp_type =
8928 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8929 specs[count++].index = p2;
8934 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8936 specs[count] = tmpl;
8937 specs[count++].index = CURR_SLOT.qp_regno;
8939 if (idesc->operands[1] == IA64_OPND_PR)
8941 for (i = 16; i < 63; i++)
8943 specs[count] = tmpl;
8944 specs[count++].index = i;
8956 /* Verify that the instruction is using the PSR bit indicated in
8960 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
8962 if (dep->regindex < 6)
8964 specs[count++] = tmpl;
8967 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
8969 if (dep->regindex < 32
8970 || dep->regindex == 35
8971 || dep->regindex == 36
8972 || (!rsrc_write && dep->regindex == PSR_CPL))
8974 specs[count++] = tmpl;
8977 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
8979 if (dep->regindex < 32
8980 || dep->regindex == 35
8981 || dep->regindex == 36
8982 || (rsrc_write && dep->regindex == PSR_CPL))
8984 specs[count++] = tmpl;
8989 /* Several PSR bits have very specific dependencies. */
8990 switch (dep->regindex)
8993 specs[count++] = tmpl;
8998 specs[count++] = tmpl;
9002 /* Only certain CR accesses use PSR.ic */
9003 if (idesc->operands[0] == IA64_OPND_CR3
9004 || idesc->operands[1] == IA64_OPND_CR3)
9007 ((idesc->operands[0] == IA64_OPND_CR3)
9010 CURR_SLOT.opnd[index].X_add_number - REG_CR;
9025 specs[count++] = tmpl;
9034 specs[count++] = tmpl;
9038 /* Only some AR accesses use cpl */
9039 if (idesc->operands[0] == IA64_OPND_AR3
9040 || idesc->operands[1] == IA64_OPND_AR3)
9043 ((idesc->operands[0] == IA64_OPND_AR3)
9046 CURR_SLOT.opnd[index].X_add_number - REG_AR;
9053 && regno <= AR_K7))))
9055 specs[count++] = tmpl;
9060 specs[count++] = tmpl;
9070 if (idesc->operands[0] == IA64_OPND_IMMU24)
9072 mask = CURR_SLOT.opnd[0].X_add_number;
9078 if (mask & ((valueT) 1 << dep->regindex))
9080 specs[count++] = tmpl;
9085 int min = dep->regindex == PSR_DFL ? 2 : 32;
9086 int max = dep->regindex == PSR_DFL ? 31 : 127;
9087 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9088 for (i = 0; i < NELEMS (idesc->operands); i++)
9090 if (idesc->operands[i] == IA64_OPND_F1
9091 || idesc->operands[i] == IA64_OPND_F2
9092 || idesc->operands[i] == IA64_OPND_F3
9093 || idesc->operands[i] == IA64_OPND_F4)
9095 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9096 if (reg >= min && reg <= max)
9098 specs[count++] = tmpl;
9105 int min = dep->regindex == PSR_MFL ? 2 : 32;
9106 int max = dep->regindex == PSR_MFL ? 31 : 127;
9107 /* mfh is read on writes to FR32-127; mfl is read on writes to
9109 for (i = 0; i < idesc->num_outputs; i++)
9111 if (idesc->operands[i] == IA64_OPND_F1)
9113 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9114 if (reg >= min && reg <= max)
9116 specs[count++] = tmpl;
9121 else if (note == 10)
9123 for (i = 0; i < NELEMS (idesc->operands); i++)
9125 if (idesc->operands[i] == IA64_OPND_R1
9126 || idesc->operands[i] == IA64_OPND_R2
9127 || idesc->operands[i] == IA64_OPND_R3)
9129 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9130 if (regno >= 16 && regno <= 31)
9132 specs[count++] = tmpl;
9143 case IA64_RS_AR_FPSR:
9144 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9146 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9147 if (regno == AR_FPSR)
9149 specs[count++] = tmpl;
9154 specs[count++] = tmpl;
9159 /* Handle all AR[REG] resources */
9160 if (note == 0 || note == 1)
9162 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9163 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9164 && regno == dep->regindex)
9166 specs[count++] = tmpl;
9168 /* other AR[REG] resources may be affected by AR accesses */
9169 else if (idesc->operands[0] == IA64_OPND_AR3)
9172 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9173 switch (dep->regindex)
9179 if (regno == AR_BSPSTORE)
9181 specs[count++] = tmpl;
9185 (regno == AR_BSPSTORE
9186 || regno == AR_RNAT))
9188 specs[count++] = tmpl;
9193 else if (idesc->operands[1] == IA64_OPND_AR3)
9196 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9197 switch (dep->regindex)
9202 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9204 specs[count++] = tmpl;
9211 specs[count++] = tmpl;
9221 /* Handle all CR[REG] resources */
9222 if (note == 0 || note == 1)
9224 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9226 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9227 if (regno == dep->regindex)
9229 specs[count++] = tmpl;
9231 else if (!rsrc_write)
9233 /* Reads from CR[IVR] affect other resources. */
9234 if (regno == CR_IVR)
9236 if ((dep->regindex >= CR_IRR0
9237 && dep->regindex <= CR_IRR3)
9238 || dep->regindex == CR_TPR)
9240 specs[count++] = tmpl;
9247 specs[count++] = tmpl;
9256 case IA64_RS_INSERVICE:
9257 /* look for write of EOI (67) or read of IVR (65) */
9258 if ((idesc->operands[0] == IA64_OPND_CR3
9259 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9260 || (idesc->operands[1] == IA64_OPND_CR3
9261 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9263 specs[count++] = tmpl;
9270 specs[count++] = tmpl;
9281 specs[count++] = tmpl;
9285 /* Check if any of the registers accessed are in the rotating region.
9286 mov to/from pr accesses CFM only when qp_regno is in the rotating
9288 for (i = 0; i < NELEMS (idesc->operands); i++)
9290 if (idesc->operands[i] == IA64_OPND_R1
9291 || idesc->operands[i] == IA64_OPND_R2
9292 || idesc->operands[i] == IA64_OPND_R3)
9294 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9295 /* Assumes that md.rot.num_regs is always valid */
9296 if (md.rot.num_regs > 0
9298 && num < 31 + md.rot.num_regs)
9300 specs[count] = tmpl;
9301 specs[count++].specific = 0;
9304 else if (idesc->operands[i] == IA64_OPND_F1
9305 || idesc->operands[i] == IA64_OPND_F2
9306 || idesc->operands[i] == IA64_OPND_F3
9307 || idesc->operands[i] == IA64_OPND_F4)
9309 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9312 specs[count] = tmpl;
9313 specs[count++].specific = 0;
9316 else if (idesc->operands[i] == IA64_OPND_P1
9317 || idesc->operands[i] == IA64_OPND_P2)
9319 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9322 specs[count] = tmpl;
9323 specs[count++].specific = 0;
9327 if (CURR_SLOT.qp_regno > 15)
9329 specs[count] = tmpl;
9330 specs[count++].specific = 0;
9335 /* This is the same as IA64_RS_PRr, except simplified to account for
9336 the fact that there is only one register. */
9340 specs[count++] = tmpl;
9345 if (idesc->operands[2] == IA64_OPND_IMM17)
9346 mask = CURR_SLOT.opnd[2].X_add_number;
9347 if (mask & ((valueT) 1 << 63))
9348 specs[count++] = tmpl;
9350 else if (note == 11)
9352 if ((idesc->operands[0] == IA64_OPND_P1
9353 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9354 || (idesc->operands[1] == IA64_OPND_P2
9355 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9357 specs[count++] = tmpl;
9360 else if (note == 12)
9362 if (CURR_SLOT.qp_regno == 63)
9364 specs[count++] = tmpl;
9371 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9372 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9373 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9374 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9377 && (idesc->operands[0] == IA64_OPND_P1
9378 || idesc->operands[0] == IA64_OPND_P2))
9380 specs[count] = tmpl;
9381 specs[count++].cmp_type =
9382 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9385 && (idesc->operands[1] == IA64_OPND_P1
9386 || idesc->operands[1] == IA64_OPND_P2))
9388 specs[count] = tmpl;
9389 specs[count++].cmp_type =
9390 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9395 if (CURR_SLOT.qp_regno == 63)
9397 specs[count++] = tmpl;
9408 /* FIXME we can identify some individual RSE written resources, but RSE
9409 read resources have not yet been completely identified, so for now
9410 treat RSE as a single resource */
9411 if (strncmp (idesc->name, "mov", 3) == 0)
9415 if (idesc->operands[0] == IA64_OPND_AR3
9416 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9418 specs[count++] = tmpl;
9423 if (idesc->operands[0] == IA64_OPND_AR3)
9425 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9426 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9428 specs[count++] = tmpl;
9431 else if (idesc->operands[1] == IA64_OPND_AR3)
9433 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9434 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9435 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9437 specs[count++] = tmpl;
9444 specs[count++] = tmpl;
9449 /* FIXME -- do any of these need to be non-specific? */
9450 specs[count++] = tmpl;
9454 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9461 /* Clear branch flags on marked resources. This breaks the link between the
9462 QP of the marking instruction and a subsequent branch on the same QP. */
9465 clear_qp_branch_flag (mask)
9469 for (i = 0; i < regdepslen; i++)
9471 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9472 if ((bit & mask) != 0)
9474 regdeps[i].link_to_qp_branch = 0;
9479 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9480 any mutexes which contain one of the PRs and create new ones when
9484 update_qp_mutex (valueT mask)
9490 while (i < qp_mutexeslen)
9492 if ((qp_mutexes[i].prmask & mask) != 0)
9494 /* If it destroys and creates the same mutex, do nothing. */
9495 if (qp_mutexes[i].prmask == mask
9496 && qp_mutexes[i].path == md.path)
9507 fprintf (stderr, " Clearing mutex relation");
9508 print_prmask (qp_mutexes[i].prmask);
9509 fprintf (stderr, "\n");
9512 /* Deal with the old mutex with more than 3+ PRs only if
9513 the new mutex on the same execution path with it.
9515 FIXME: The 3+ mutex support is incomplete.
9516 dot_pred_rel () may be a better place to fix it. */
9517 if (qp_mutexes[i].path == md.path)
9519 /* If it is a proper subset of the mutex, create a
9522 && (qp_mutexes[i].prmask & mask) == mask)
9525 qp_mutexes[i].prmask &= ~mask;
9526 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9528 /* Modify the mutex if there are more than one
9536 /* Remove the mutex. */
9537 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9545 add_qp_mutex (mask);
9550 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9552 Any changes to a PR clears the mutex relations which include that PR. */
9555 clear_qp_mutex (mask)
9561 while (i < qp_mutexeslen)
9563 if ((qp_mutexes[i].prmask & mask) != 0)
9567 fprintf (stderr, " Clearing mutex relation");
9568 print_prmask (qp_mutexes[i].prmask);
9569 fprintf (stderr, "\n");
9571 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9578 /* Clear implies relations which contain PRs in the given masks.
9579 P1_MASK indicates the source of the implies relation, while P2_MASK
9580 indicates the implied PR. */
9583 clear_qp_implies (p1_mask, p2_mask)
9590 while (i < qp_implieslen)
9592 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9593 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9596 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9597 qp_implies[i].p1, qp_implies[i].p2);
9598 qp_implies[i] = qp_implies[--qp_implieslen];
9605 /* Add the PRs specified to the list of implied relations. */
9608 add_qp_imply (p1, p2)
9615 /* p0 is not meaningful here. */
9616 if (p1 == 0 || p2 == 0)
9622 /* If it exists already, ignore it. */
9623 for (i = 0; i < qp_implieslen; i++)
9625 if (qp_implies[i].p1 == p1
9626 && qp_implies[i].p2 == p2
9627 && qp_implies[i].path == md.path
9628 && !qp_implies[i].p2_branched)
9632 if (qp_implieslen == qp_impliestotlen)
9634 qp_impliestotlen += 20;
9635 qp_implies = (struct qp_imply *)
9636 xrealloc ((void *) qp_implies,
9637 qp_impliestotlen * sizeof (struct qp_imply));
9640 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9641 qp_implies[qp_implieslen].p1 = p1;
9642 qp_implies[qp_implieslen].p2 = p2;
9643 qp_implies[qp_implieslen].path = md.path;
9644 qp_implies[qp_implieslen++].p2_branched = 0;
9646 /* Add in the implied transitive relations; for everything that p2 implies,
9647 make p1 imply that, too; for everything that implies p1, make it imply p2
9649 for (i = 0; i < qp_implieslen; i++)
9651 if (qp_implies[i].p1 == p2)
9652 add_qp_imply (p1, qp_implies[i].p2);
9653 if (qp_implies[i].p2 == p1)
9654 add_qp_imply (qp_implies[i].p1, p2);
9656 /* Add in mutex relations implied by this implies relation; for each mutex
9657 relation containing p2, duplicate it and replace p2 with p1. */
9658 bit = (valueT) 1 << p1;
9659 mask = (valueT) 1 << p2;
9660 for (i = 0; i < qp_mutexeslen; i++)
9662 if (qp_mutexes[i].prmask & mask)
9663 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9667 /* Add the PRs specified in the mask to the mutex list; this means that only
9668 one of the PRs can be true at any time. PR0 should never be included in
9678 if (qp_mutexeslen == qp_mutexestotlen)
9680 qp_mutexestotlen += 20;
9681 qp_mutexes = (struct qpmutex *)
9682 xrealloc ((void *) qp_mutexes,
9683 qp_mutexestotlen * sizeof (struct qpmutex));
9687 fprintf (stderr, " Registering mutex on");
9688 print_prmask (mask);
9689 fprintf (stderr, "\n");
9691 qp_mutexes[qp_mutexeslen].path = md.path;
9692 qp_mutexes[qp_mutexeslen++].prmask = mask;
9696 has_suffix_p (name, suffix)
9700 size_t namelen = strlen (name);
9701 size_t sufflen = strlen (suffix);
9703 if (namelen <= sufflen)
9705 return strcmp (name + namelen - sufflen, suffix) == 0;
9709 clear_register_values ()
9713 fprintf (stderr, " Clearing register values\n");
9714 for (i = 1; i < NELEMS (gr_values); i++)
9715 gr_values[i].known = 0;
9718 /* Keep track of register values/changes which affect DV tracking.
9720 optimization note: should add a flag to classes of insns where otherwise we
9721 have to examine a group of strings to identify them. */
9724 note_register_values (idesc)
9725 struct ia64_opcode *idesc;
9727 valueT qp_changemask = 0;
9730 /* Invalidate values for registers being written to. */
9731 for (i = 0; i < idesc->num_outputs; i++)
9733 if (idesc->operands[i] == IA64_OPND_R1
9734 || idesc->operands[i] == IA64_OPND_R2
9735 || idesc->operands[i] == IA64_OPND_R3)
9737 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9738 if (regno > 0 && regno < NELEMS (gr_values))
9739 gr_values[regno].known = 0;
9741 else if (idesc->operands[i] == IA64_OPND_R3_2)
9743 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9744 if (regno > 0 && regno < 4)
9745 gr_values[regno].known = 0;
9747 else if (idesc->operands[i] == IA64_OPND_P1
9748 || idesc->operands[i] == IA64_OPND_P2)
9750 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9751 qp_changemask |= (valueT) 1 << regno;
9753 else if (idesc->operands[i] == IA64_OPND_PR)
9755 if (idesc->operands[2] & (valueT) 0x10000)
9756 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9758 qp_changemask = idesc->operands[2];
9761 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9763 if (idesc->operands[1] & ((valueT) 1 << 43))
9764 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9766 qp_changemask = idesc->operands[1];
9767 qp_changemask &= ~(valueT) 0xFFFF;
9772 /* Always clear qp branch flags on any PR change. */
9773 /* FIXME there may be exceptions for certain compares. */
9774 clear_qp_branch_flag (qp_changemask);
9776 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9777 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9779 qp_changemask |= ~(valueT) 0xFFFF;
9780 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9782 for (i = 32; i < 32 + md.rot.num_regs; i++)
9783 gr_values[i].known = 0;
9785 clear_qp_mutex (qp_changemask);
9786 clear_qp_implies (qp_changemask, qp_changemask);
9788 /* After a call, all register values are undefined, except those marked
9790 else if (strncmp (idesc->name, "br.call", 6) == 0
9791 || strncmp (idesc->name, "brl.call", 7) == 0)
9793 /* FIXME keep GR values which are marked as "safe_across_calls" */
9794 clear_register_values ();
9795 clear_qp_mutex (~qp_safe_across_calls);
9796 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9797 clear_qp_branch_flag (~qp_safe_across_calls);
9799 else if (is_interruption_or_rfi (idesc)
9800 || is_taken_branch (idesc))
9802 clear_register_values ();
9803 clear_qp_mutex (~(valueT) 0);
9804 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9806 /* Look for mutex and implies relations. */
9807 else if ((idesc->operands[0] == IA64_OPND_P1
9808 || idesc->operands[0] == IA64_OPND_P2)
9809 && (idesc->operands[1] == IA64_OPND_P1
9810 || idesc->operands[1] == IA64_OPND_P2))
9812 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9813 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9814 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9815 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9817 /* If both PRs are PR0, we can't really do anything. */
9818 if (p1 == 0 && p2 == 0)
9821 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9823 /* In general, clear mutexes and implies which include P1 or P2,
9824 with the following exceptions. */
9825 else if (has_suffix_p (idesc->name, ".or.andcm")
9826 || has_suffix_p (idesc->name, ".and.orcm"))
9828 clear_qp_implies (p2mask, p1mask);
9830 else if (has_suffix_p (idesc->name, ".andcm")
9831 || has_suffix_p (idesc->name, ".and"))
9833 clear_qp_implies (0, p1mask | p2mask);
9835 else if (has_suffix_p (idesc->name, ".orcm")
9836 || has_suffix_p (idesc->name, ".or"))
9838 clear_qp_mutex (p1mask | p2mask);
9839 clear_qp_implies (p1mask | p2mask, 0);
9845 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9847 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9848 if (p1 == 0 || p2 == 0)
9849 clear_qp_mutex (p1mask | p2mask);
9851 added = update_qp_mutex (p1mask | p2mask);
9853 if (CURR_SLOT.qp_regno == 0
9854 || has_suffix_p (idesc->name, ".unc"))
9856 if (added == 0 && p1 && p2)
9857 add_qp_mutex (p1mask | p2mask);
9858 if (CURR_SLOT.qp_regno != 0)
9861 add_qp_imply (p1, CURR_SLOT.qp_regno);
9863 add_qp_imply (p2, CURR_SLOT.qp_regno);
9868 /* Look for mov imm insns into GRs. */
9869 else if (idesc->operands[0] == IA64_OPND_R1
9870 && (idesc->operands[1] == IA64_OPND_IMM22
9871 || idesc->operands[1] == IA64_OPND_IMMU64)
9872 && CURR_SLOT.opnd[1].X_op == O_constant
9873 && (strcmp (idesc->name, "mov") == 0
9874 || strcmp (idesc->name, "movl") == 0))
9876 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9877 if (regno > 0 && regno < NELEMS (gr_values))
9879 gr_values[regno].known = 1;
9880 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9881 gr_values[regno].path = md.path;
9884 fprintf (stderr, " Know gr%d = ", regno);
9885 fprintf_vma (stderr, gr_values[regno].value);
9886 fputs ("\n", stderr);
9890 /* Look for dep.z imm insns. */
9891 else if (idesc->operands[0] == IA64_OPND_R1
9892 && idesc->operands[1] == IA64_OPND_IMM8
9893 && strcmp (idesc->name, "dep.z") == 0)
9895 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9896 if (regno > 0 && regno < NELEMS (gr_values))
9898 valueT value = CURR_SLOT.opnd[1].X_add_number;
9900 if (CURR_SLOT.opnd[3].X_add_number < 64)
9901 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9902 value <<= CURR_SLOT.opnd[2].X_add_number;
9903 gr_values[regno].known = 1;
9904 gr_values[regno].value = value;
9905 gr_values[regno].path = md.path;
9908 fprintf (stderr, " Know gr%d = ", regno);
9909 fprintf_vma (stderr, gr_values[regno].value);
9910 fputs ("\n", stderr);
9916 clear_qp_mutex (qp_changemask);
9917 clear_qp_implies (qp_changemask, qp_changemask);
9921 /* Return whether the given predicate registers are currently mutex. */
9924 qp_mutex (p1, p2, path)
9934 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9935 for (i = 0; i < qp_mutexeslen; i++)
9937 if (qp_mutexes[i].path >= path
9938 && (qp_mutexes[i].prmask & mask) == mask)
9945 /* Return whether the given resource is in the given insn's list of chks
9946 Return 1 if the conflict is absolutely determined, 2 if it's a potential
9950 resources_match (rs, idesc, note, qp_regno, path)
9952 struct ia64_opcode *idesc;
9957 struct rsrc specs[MAX_SPECS];
9960 /* If the marked resource's qp_regno and the given qp_regno are mutex,
9961 we don't need to check. One exception is note 11, which indicates that
9962 target predicates are written regardless of PR[qp]. */
9963 if (qp_mutex (rs->qp_regno, qp_regno, path)
9967 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
9970 /* UNAT checking is a bit more specific than other resources */
9971 if (rs->dependency->specifier == IA64_RS_AR_UNAT
9972 && specs[count].mem_offset.hint
9973 && rs->mem_offset.hint)
9975 if (rs->mem_offset.base == specs[count].mem_offset.base)
9977 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
9978 ((specs[count].mem_offset.offset >> 3) & 0x3F))
9985 /* Skip apparent PR write conflicts where both writes are an AND or both
9986 writes are an OR. */
9987 if (rs->dependency->specifier == IA64_RS_PR
9988 || rs->dependency->specifier == IA64_RS_PRr
9989 || rs->dependency->specifier == IA64_RS_PR63)
9991 if (specs[count].cmp_type != CMP_NONE
9992 && specs[count].cmp_type == rs->cmp_type)
9995 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
9996 dv_mode[rs->dependency->mode],
9997 rs->dependency->specifier != IA64_RS_PR63 ?
9998 specs[count].index : 63);
10003 " %s on parallel compare conflict %s vs %s on PR%d\n",
10004 dv_mode[rs->dependency->mode],
10005 dv_cmp_type[rs->cmp_type],
10006 dv_cmp_type[specs[count].cmp_type],
10007 rs->dependency->specifier != IA64_RS_PR63 ?
10008 specs[count].index : 63);
10012 /* If either resource is not specific, conservatively assume a conflict
10014 if (!specs[count].specific || !rs->specific)
10016 else if (specs[count].index == rs->index)
10023 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10024 insert a stop to create the break. Update all resource dependencies
10025 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10026 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10027 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10031 insn_group_break (insert_stop, qp_regno, save_current)
10038 if (insert_stop && md.num_slots_in_use > 0)
10039 PREV_SLOT.end_of_insn_group = 1;
10043 fprintf (stderr, " Insn group break%s",
10044 (insert_stop ? " (w/stop)" : ""));
10046 fprintf (stderr, " effective for QP=%d", qp_regno);
10047 fprintf (stderr, "\n");
10051 while (i < regdepslen)
10053 const struct ia64_dependency *dep = regdeps[i].dependency;
10056 && regdeps[i].qp_regno != qp_regno)
10063 && CURR_SLOT.src_file == regdeps[i].file
10064 && CURR_SLOT.src_line == regdeps[i].line)
10070 /* clear dependencies which are automatically cleared by a stop, or
10071 those that have reached the appropriate state of insn serialization */
10072 if (dep->semantics == IA64_DVS_IMPLIED
10073 || dep->semantics == IA64_DVS_IMPLIEDF
10074 || regdeps[i].insn_srlz == STATE_SRLZ)
10076 print_dependency ("Removing", i);
10077 regdeps[i] = regdeps[--regdepslen];
10081 if (dep->semantics == IA64_DVS_DATA
10082 || dep->semantics == IA64_DVS_INSTR
10083 || dep->semantics == IA64_DVS_SPECIFIC)
10085 if (regdeps[i].insn_srlz == STATE_NONE)
10086 regdeps[i].insn_srlz = STATE_STOP;
10087 if (regdeps[i].data_srlz == STATE_NONE)
10088 regdeps[i].data_srlz = STATE_STOP;
10095 /* Add the given resource usage spec to the list of active dependencies. */
10098 mark_resource (idesc, dep, spec, depind, path)
10099 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
10100 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
10105 if (regdepslen == regdepstotlen)
10107 regdepstotlen += 20;
10108 regdeps = (struct rsrc *)
10109 xrealloc ((void *) regdeps,
10110 regdepstotlen * sizeof (struct rsrc));
10113 regdeps[regdepslen] = *spec;
10114 regdeps[regdepslen].depind = depind;
10115 regdeps[regdepslen].path = path;
10116 regdeps[regdepslen].file = CURR_SLOT.src_file;
10117 regdeps[regdepslen].line = CURR_SLOT.src_line;
10119 print_dependency ("Adding", regdepslen);
10125 print_dependency (action, depind)
10126 const char *action;
10131 fprintf (stderr, " %s %s '%s'",
10132 action, dv_mode[(regdeps[depind].dependency)->mode],
10133 (regdeps[depind].dependency)->name);
10134 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10135 fprintf (stderr, " (%d)", regdeps[depind].index);
10136 if (regdeps[depind].mem_offset.hint)
10138 fputs (" ", stderr);
10139 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10140 fputs ("+", stderr);
10141 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10143 fprintf (stderr, "\n");
10148 instruction_serialization ()
10152 fprintf (stderr, " Instruction serialization\n");
10153 for (i = 0; i < regdepslen; i++)
10154 if (regdeps[i].insn_srlz == STATE_STOP)
10155 regdeps[i].insn_srlz = STATE_SRLZ;
10159 data_serialization ()
10163 fprintf (stderr, " Data serialization\n");
10164 while (i < regdepslen)
10166 if (regdeps[i].data_srlz == STATE_STOP
10167 /* Note: as of 991210, all "other" dependencies are cleared by a
10168 data serialization. This might change with new tables */
10169 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10171 print_dependency ("Removing", i);
10172 regdeps[i] = regdeps[--regdepslen];
10179 /* Insert stops and serializations as needed to avoid DVs. */
10182 remove_marked_resource (rs)
10185 switch (rs->dependency->semantics)
10187 case IA64_DVS_SPECIFIC:
10189 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10190 /* ...fall through... */
10191 case IA64_DVS_INSTR:
10193 fprintf (stderr, "Inserting instr serialization\n");
10194 if (rs->insn_srlz < STATE_STOP)
10195 insn_group_break (1, 0, 0);
10196 if (rs->insn_srlz < STATE_SRLZ)
10198 struct slot oldslot = CURR_SLOT;
10199 /* Manually jam a srlz.i insn into the stream */
10200 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10201 CURR_SLOT.user_template = -1;
10202 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10203 instruction_serialization ();
10204 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10205 if (++md.num_slots_in_use >= NUM_SLOTS)
10206 emit_one_bundle ();
10207 CURR_SLOT = oldslot;
10209 insn_group_break (1, 0, 0);
10211 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10212 "other" types of DV are eliminated
10213 by a data serialization */
10214 case IA64_DVS_DATA:
10216 fprintf (stderr, "Inserting data serialization\n");
10217 if (rs->data_srlz < STATE_STOP)
10218 insn_group_break (1, 0, 0);
10220 struct slot oldslot = CURR_SLOT;
10221 /* Manually jam a srlz.d insn into the stream */
10222 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10223 CURR_SLOT.user_template = -1;
10224 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10225 data_serialization ();
10226 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10227 if (++md.num_slots_in_use >= NUM_SLOTS)
10228 emit_one_bundle ();
10229 CURR_SLOT = oldslot;
10232 case IA64_DVS_IMPLIED:
10233 case IA64_DVS_IMPLIEDF:
10235 fprintf (stderr, "Inserting stop\n");
10236 insn_group_break (1, 0, 0);
10243 /* Check the resources used by the given opcode against the current dependency
10246 The check is run once for each execution path encountered. In this case,
10247 a unique execution path is the sequence of instructions following a code
10248 entry point, e.g. the following has three execution paths, one starting
10249 at L0, one at L1, and one at L2.
10258 check_dependencies (idesc)
10259 struct ia64_opcode *idesc;
10261 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10265 /* Note that the number of marked resources may change within the
10266 loop if in auto mode. */
10268 while (i < regdepslen)
10270 struct rsrc *rs = ®deps[i];
10271 const struct ia64_dependency *dep = rs->dependency;
10274 int start_over = 0;
10276 if (dep->semantics == IA64_DVS_NONE
10277 || (chkind = depends_on (rs->depind, idesc)) == -1)
10283 note = NOTE (opdeps->chks[chkind]);
10285 /* Check this resource against each execution path seen thus far. */
10286 for (path = 0; path <= md.path; path++)
10290 /* If the dependency wasn't on the path being checked, ignore it. */
10291 if (rs->path < path)
10294 /* If the QP for this insn implies a QP which has branched, don't
10295 bother checking. Ed. NOTE: I don't think this check is terribly
10296 useful; what's the point of generating code which will only be
10297 reached if its QP is zero?
10298 This code was specifically inserted to handle the following code,
10299 based on notes from Intel's DV checking code, where p1 implies p2.
10305 if (CURR_SLOT.qp_regno != 0)
10309 for (implies = 0; implies < qp_implieslen; implies++)
10311 if (qp_implies[implies].path >= path
10312 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10313 && qp_implies[implies].p2_branched)
10323 if ((matchtype = resources_match (rs, idesc, note,
10324 CURR_SLOT.qp_regno, path)) != 0)
10327 char pathmsg[256] = "";
10328 char indexmsg[256] = "";
10329 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10332 sprintf (pathmsg, " when entry is at label '%s'",
10333 md.entry_labels[path - 1]);
10334 if (matchtype == 1 && rs->index >= 0)
10335 sprintf (indexmsg, ", specific resource number is %d",
10337 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10339 (certain ? "violates" : "may violate"),
10340 dv_mode[dep->mode], dep->name,
10341 dv_sem[dep->semantics],
10342 pathmsg, indexmsg);
10344 if (md.explicit_mode)
10346 as_warn ("%s", msg);
10347 if (path < md.path)
10348 as_warn (_("Only the first path encountering the conflict "
10350 as_warn_where (rs->file, rs->line,
10351 _("This is the location of the "
10352 "conflicting usage"));
10353 /* Don't bother checking other paths, to avoid duplicating
10354 the same warning */
10360 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10362 remove_marked_resource (rs);
10364 /* since the set of dependencies has changed, start over */
10365 /* FIXME -- since we're removing dvs as we go, we
10366 probably don't really need to start over... */
10379 /* Register new dependencies based on the given opcode. */
10382 mark_resources (idesc)
10383 struct ia64_opcode *idesc;
10386 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10387 int add_only_qp_reads = 0;
10389 /* A conditional branch only uses its resources if it is taken; if it is
10390 taken, we stop following that path. The other branch types effectively
10391 *always* write their resources. If it's not taken, register only QP
10393 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10395 add_only_qp_reads = 1;
10399 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10401 for (i = 0; i < opdeps->nregs; i++)
10403 const struct ia64_dependency *dep;
10404 struct rsrc specs[MAX_SPECS];
10409 dep = ia64_find_dependency (opdeps->regs[i]);
10410 note = NOTE (opdeps->regs[i]);
10412 if (add_only_qp_reads
10413 && !(dep->mode == IA64_DV_WAR
10414 && (dep->specifier == IA64_RS_PR
10415 || dep->specifier == IA64_RS_PRr
10416 || dep->specifier == IA64_RS_PR63)))
10419 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10421 while (count-- > 0)
10423 mark_resource (idesc, dep, &specs[count],
10424 DEP (opdeps->regs[i]), md.path);
10427 /* The execution path may affect register values, which may in turn
10428 affect which indirect-access resources are accessed. */
10429 switch (dep->specifier)
10433 case IA64_RS_CPUID:
10441 for (path = 0; path < md.path; path++)
10443 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10444 while (count-- > 0)
10445 mark_resource (idesc, dep, &specs[count],
10446 DEP (opdeps->regs[i]), path);
10453 /* Remove dependencies when they no longer apply. */
10456 update_dependencies (idesc)
10457 struct ia64_opcode *idesc;
10461 if (strcmp (idesc->name, "srlz.i") == 0)
10463 instruction_serialization ();
10465 else if (strcmp (idesc->name, "srlz.d") == 0)
10467 data_serialization ();
10469 else if (is_interruption_or_rfi (idesc)
10470 || is_taken_branch (idesc))
10472 /* Although technically the taken branch doesn't clear dependencies
10473 which require a srlz.[id], we don't follow the branch; the next
10474 instruction is assumed to start with a clean slate. */
10478 else if (is_conditional_branch (idesc)
10479 && CURR_SLOT.qp_regno != 0)
10481 int is_call = strstr (idesc->name, ".call") != NULL;
10483 for (i = 0; i < qp_implieslen; i++)
10485 /* If the conditional branch's predicate is implied by the predicate
10486 in an existing dependency, remove that dependency. */
10487 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10490 /* Note that this implied predicate takes a branch so that if
10491 a later insn generates a DV but its predicate implies this
10492 one, we can avoid the false DV warning. */
10493 qp_implies[i].p2_branched = 1;
10494 while (depind < regdepslen)
10496 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10498 print_dependency ("Removing", depind);
10499 regdeps[depind] = regdeps[--regdepslen];
10506 /* Any marked resources which have this same predicate should be
10507 cleared, provided that the QP hasn't been modified between the
10508 marking instruction and the branch. */
10511 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10516 while (i < regdepslen)
10518 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10519 && regdeps[i].link_to_qp_branch
10520 && (regdeps[i].file != CURR_SLOT.src_file
10521 || regdeps[i].line != CURR_SLOT.src_line))
10523 /* Treat like a taken branch */
10524 print_dependency ("Removing", i);
10525 regdeps[i] = regdeps[--regdepslen];
10534 /* Examine the current instruction for dependency violations. */
10538 struct ia64_opcode *idesc;
10542 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10543 idesc->name, CURR_SLOT.src_line,
10544 idesc->dependencies->nchks,
10545 idesc->dependencies->nregs);
10548 /* Look through the list of currently marked resources; if the current
10549 instruction has the dependency in its chks list which uses that resource,
10550 check against the specific resources used. */
10551 check_dependencies (idesc);
10553 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10554 then add them to the list of marked resources. */
10555 mark_resources (idesc);
10557 /* There are several types of dependency semantics, and each has its own
10558 requirements for being cleared
10560 Instruction serialization (insns separated by interruption, rfi, or
10561 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10563 Data serialization (instruction serialization, or writer + srlz.d +
10564 reader, where writer and srlz.d are in separate groups) clears
10565 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10566 always be the case).
10568 Instruction group break (groups separated by stop, taken branch,
10569 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10571 update_dependencies (idesc);
10573 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10574 warning. Keep track of as many as possible that are useful. */
10575 note_register_values (idesc);
10577 /* We don't need or want this anymore. */
10578 md.mem_offset.hint = 0;
10583 /* Translate one line of assembly. Pseudo ops and labels do not show
10589 char *saved_input_line_pointer, *mnemonic;
10590 const struct pseudo_opcode *pdesc;
10591 struct ia64_opcode *idesc;
10592 unsigned char qp_regno;
10593 unsigned int flags;
10596 saved_input_line_pointer = input_line_pointer;
10597 input_line_pointer = str;
10599 /* extract the opcode (mnemonic): */
10601 mnemonic = input_line_pointer;
10602 ch = get_symbol_end ();
10603 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10606 *input_line_pointer = ch;
10607 (*pdesc->handler) (pdesc->arg);
10611 /* Find the instruction descriptor matching the arguments. */
10613 idesc = ia64_find_opcode (mnemonic);
10614 *input_line_pointer = ch;
10617 as_bad ("Unknown opcode `%s'", mnemonic);
10621 idesc = parse_operands (idesc);
10625 /* Handle the dynamic ops we can handle now: */
10626 if (idesc->type == IA64_TYPE_DYN)
10628 if (strcmp (idesc->name, "add") == 0)
10630 if (CURR_SLOT.opnd[2].X_op == O_register
10631 && CURR_SLOT.opnd[2].X_add_number < 4)
10635 ia64_free_opcode (idesc);
10636 idesc = ia64_find_opcode (mnemonic);
10638 else if (strcmp (idesc->name, "mov") == 0)
10640 enum ia64_opnd opnd1, opnd2;
10643 opnd1 = idesc->operands[0];
10644 opnd2 = idesc->operands[1];
10645 if (opnd1 == IA64_OPND_AR3)
10647 else if (opnd2 == IA64_OPND_AR3)
10651 if (CURR_SLOT.opnd[rop].X_op == O_register)
10653 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10654 mnemonic = "mov.i";
10655 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10656 mnemonic = "mov.m";
10664 ia64_free_opcode (idesc);
10665 idesc = ia64_find_opcode (mnemonic);
10666 while (idesc != NULL
10667 && (idesc->operands[0] != opnd1
10668 || idesc->operands[1] != opnd2))
10669 idesc = get_next_opcode (idesc);
10673 else if (strcmp (idesc->name, "mov.i") == 0
10674 || strcmp (idesc->name, "mov.m") == 0)
10676 enum ia64_opnd opnd1, opnd2;
10679 opnd1 = idesc->operands[0];
10680 opnd2 = idesc->operands[1];
10681 if (opnd1 == IA64_OPND_AR3)
10683 else if (opnd2 == IA64_OPND_AR3)
10687 if (CURR_SLOT.opnd[rop].X_op == O_register)
10690 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10692 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10694 if (unit != 'a' && unit != idesc->name [4])
10695 as_bad ("AR %d cannot be accessed by %c-unit",
10696 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10700 else if (strcmp (idesc->name, "hint.b") == 0)
10706 case hint_b_warning:
10707 as_warn ("hint.b may be treated as nop");
10710 as_bad ("hint.b shouldn't be used");
10716 if (md.qp.X_op == O_register)
10718 qp_regno = md.qp.X_add_number - REG_P;
10719 md.qp.X_op = O_absent;
10722 flags = idesc->flags;
10724 if ((flags & IA64_OPCODE_FIRST) != 0)
10726 /* The alignment frag has to end with a stop bit only if the
10727 next instruction after the alignment directive has to be
10728 the first instruction in an instruction group. */
10731 while (align_frag->fr_type != rs_align_code)
10733 align_frag = align_frag->fr_next;
10737 /* align_frag can be NULL if there are directives in
10739 if (align_frag && align_frag->fr_next == frag_now)
10740 align_frag->tc_frag_data = 1;
10743 insn_group_break (1, 0, 0);
10747 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10749 as_bad ("`%s' cannot be predicated", idesc->name);
10753 /* Build the instruction. */
10754 CURR_SLOT.qp_regno = qp_regno;
10755 CURR_SLOT.idesc = idesc;
10756 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
10757 dwarf2_where (&CURR_SLOT.debug_line);
10759 /* Add unwind entry, if there is one. */
10760 if (unwind.current_entry)
10762 CURR_SLOT.unwind_record = unwind.current_entry;
10763 unwind.current_entry = NULL;
10765 if (unwind.proc_start && S_IS_DEFINED (unwind.proc_start))
10768 /* Check for dependency violations. */
10772 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10773 if (++md.num_slots_in_use >= NUM_SLOTS)
10774 emit_one_bundle ();
10776 if ((flags & IA64_OPCODE_LAST) != 0)
10777 insn_group_break (1, 0, 0);
10779 md.last_text_seg = now_seg;
10782 input_line_pointer = saved_input_line_pointer;
10785 /* Called when symbol NAME cannot be found in the symbol table.
10786 Should be used for dynamic valued symbols only. */
10789 md_undefined_symbol (name)
10790 char *name ATTRIBUTE_UNUSED;
10795 /* Called for any expression that can not be recognized. When the
10796 function is called, `input_line_pointer' will point to the start of
10803 switch (*input_line_pointer)
10806 ++input_line_pointer;
10808 if (*input_line_pointer != ']')
10810 as_bad ("Closing bracket missing");
10815 if (e->X_op != O_register)
10816 as_bad ("Register expected as index");
10818 ++input_line_pointer;
10829 ignore_rest_of_line ();
10832 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10833 a section symbol plus some offset. For relocs involving @fptr(),
10834 directives we don't want such adjustments since we need to have the
10835 original symbol's name in the reloc. */
10837 ia64_fix_adjustable (fix)
10840 /* Prevent all adjustments to global symbols */
10841 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10844 switch (fix->fx_r_type)
10846 case BFD_RELOC_IA64_FPTR64I:
10847 case BFD_RELOC_IA64_FPTR32MSB:
10848 case BFD_RELOC_IA64_FPTR32LSB:
10849 case BFD_RELOC_IA64_FPTR64MSB:
10850 case BFD_RELOC_IA64_FPTR64LSB:
10851 case BFD_RELOC_IA64_LTOFF_FPTR22:
10852 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10862 ia64_force_relocation (fix)
10865 switch (fix->fx_r_type)
10867 case BFD_RELOC_IA64_FPTR64I:
10868 case BFD_RELOC_IA64_FPTR32MSB:
10869 case BFD_RELOC_IA64_FPTR32LSB:
10870 case BFD_RELOC_IA64_FPTR64MSB:
10871 case BFD_RELOC_IA64_FPTR64LSB:
10873 case BFD_RELOC_IA64_LTOFF22:
10874 case BFD_RELOC_IA64_LTOFF64I:
10875 case BFD_RELOC_IA64_LTOFF_FPTR22:
10876 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10877 case BFD_RELOC_IA64_PLTOFF22:
10878 case BFD_RELOC_IA64_PLTOFF64I:
10879 case BFD_RELOC_IA64_PLTOFF64MSB:
10880 case BFD_RELOC_IA64_PLTOFF64LSB:
10882 case BFD_RELOC_IA64_LTOFF22X:
10883 case BFD_RELOC_IA64_LDXMOV:
10890 return generic_force_reloc (fix);
10893 /* Decide from what point a pc-relative relocation is relative to,
10894 relative to the pc-relative fixup. Er, relatively speaking. */
10896 ia64_pcrel_from_section (fix, sec)
10900 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10902 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10909 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10911 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10915 expr.X_op = O_pseudo_fixup;
10916 expr.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10917 expr.X_add_number = 0;
10918 expr.X_add_symbol = symbol;
10919 emit_expr (&expr, size);
10922 /* This is called whenever some data item (not an instruction) needs a
10923 fixup. We pick the right reloc code depending on the byteorder
10924 currently in effect. */
10926 ia64_cons_fix_new (f, where, nbytes, exp)
10932 bfd_reloc_code_real_type code;
10937 /* There are no reloc for 8 and 16 bit quantities, but we allow
10938 them here since they will work fine as long as the expression
10939 is fully defined at the end of the pass over the source file. */
10940 case 1: code = BFD_RELOC_8; break;
10941 case 2: code = BFD_RELOC_16; break;
10943 if (target_big_endian)
10944 code = BFD_RELOC_IA64_DIR32MSB;
10946 code = BFD_RELOC_IA64_DIR32LSB;
10950 /* In 32-bit mode, data8 could mean function descriptors too. */
10951 if (exp->X_op == O_pseudo_fixup
10952 && exp->X_op_symbol
10953 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
10954 && !(md.flags & EF_IA_64_ABI64))
10956 if (target_big_endian)
10957 code = BFD_RELOC_IA64_IPLTMSB;
10959 code = BFD_RELOC_IA64_IPLTLSB;
10960 exp->X_op = O_symbol;
10965 if (target_big_endian)
10966 code = BFD_RELOC_IA64_DIR64MSB;
10968 code = BFD_RELOC_IA64_DIR64LSB;
10973 if (exp->X_op == O_pseudo_fixup
10974 && exp->X_op_symbol
10975 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
10977 if (target_big_endian)
10978 code = BFD_RELOC_IA64_IPLTMSB;
10980 code = BFD_RELOC_IA64_IPLTLSB;
10981 exp->X_op = O_symbol;
10987 as_bad ("Unsupported fixup size %d", nbytes);
10988 ignore_rest_of_line ();
10992 if (exp->X_op == O_pseudo_fixup)
10994 exp->X_op = O_symbol;
10995 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
10996 /* ??? If code unchanged, unsupported. */
10999 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11000 /* We need to store the byte order in effect in case we're going
11001 to fix an 8 or 16 bit relocation (for which there no real
11002 relocs available). See md_apply_fix3(). */
11003 fix->tc_fix_data.bigendian = target_big_endian;
11006 /* Return the actual relocation we wish to associate with the pseudo
11007 reloc described by SYM and R_TYPE. SYM should be one of the
11008 symbols in the pseudo_func array, or NULL. */
11010 static bfd_reloc_code_real_type
11011 ia64_gen_real_reloc_type (sym, r_type)
11012 struct symbol *sym;
11013 bfd_reloc_code_real_type r_type;
11015 bfd_reloc_code_real_type new = 0;
11016 const char *type = NULL, *suffix = "";
11023 switch (S_GET_VALUE (sym))
11025 case FUNC_FPTR_RELATIVE:
11028 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
11029 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
11030 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
11031 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
11032 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
11033 default: type = "FPTR"; break;
11037 case FUNC_GP_RELATIVE:
11040 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
11041 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
11042 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
11043 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
11044 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
11045 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
11046 default: type = "GPREL"; break;
11050 case FUNC_LT_RELATIVE:
11053 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
11054 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
11055 default: type = "LTOFF"; break;
11059 case FUNC_LT_RELATIVE_X:
11062 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22X; break;
11063 default: type = "LTOFF"; suffix = "X"; break;
11067 case FUNC_PC_RELATIVE:
11070 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
11071 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
11072 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
11073 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
11074 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
11075 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
11076 default: type = "PCREL"; break;
11080 case FUNC_PLT_RELATIVE:
11083 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
11084 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
11085 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
11086 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
11087 default: type = "PLTOFF"; break;
11091 case FUNC_SEC_RELATIVE:
11094 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
11095 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
11096 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
11097 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
11098 default: type = "SECREL"; break;
11102 case FUNC_SEG_RELATIVE:
11105 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
11106 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
11107 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
11108 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
11109 default: type = "SEGREL"; break;
11113 case FUNC_LTV_RELATIVE:
11116 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
11117 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
11118 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
11119 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
11120 default: type = "LTV"; break;
11124 case FUNC_LT_FPTR_RELATIVE:
11127 case BFD_RELOC_IA64_IMM22:
11128 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11129 case BFD_RELOC_IA64_IMM64:
11130 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11131 case BFD_RELOC_IA64_DIR32MSB:
11132 new = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11133 case BFD_RELOC_IA64_DIR32LSB:
11134 new = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11135 case BFD_RELOC_IA64_DIR64MSB:
11136 new = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11137 case BFD_RELOC_IA64_DIR64LSB:
11138 new = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11140 type = "LTOFF_FPTR"; break;
11144 case FUNC_TP_RELATIVE:
11147 case BFD_RELOC_IA64_IMM14: new = BFD_RELOC_IA64_TPREL14; break;
11148 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_TPREL22; break;
11149 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_TPREL64I; break;
11150 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_TPREL64MSB; break;
11151 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_TPREL64LSB; break;
11152 default: type = "TPREL"; break;
11156 case FUNC_LT_TP_RELATIVE:
11159 case BFD_RELOC_IA64_IMM22:
11160 new = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11162 type = "LTOFF_TPREL"; break;
11166 case FUNC_DTP_MODULE:
11169 case BFD_RELOC_IA64_DIR64MSB:
11170 new = BFD_RELOC_IA64_DTPMOD64MSB; break;
11171 case BFD_RELOC_IA64_DIR64LSB:
11172 new = BFD_RELOC_IA64_DTPMOD64LSB; break;
11174 type = "DTPMOD"; break;
11178 case FUNC_LT_DTP_MODULE:
11181 case BFD_RELOC_IA64_IMM22:
11182 new = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11184 type = "LTOFF_DTPMOD"; break;
11188 case FUNC_DTP_RELATIVE:
11191 case BFD_RELOC_IA64_DIR32MSB:
11192 new = BFD_RELOC_IA64_DTPREL32MSB; break;
11193 case BFD_RELOC_IA64_DIR32LSB:
11194 new = BFD_RELOC_IA64_DTPREL32LSB; break;
11195 case BFD_RELOC_IA64_DIR64MSB:
11196 new = BFD_RELOC_IA64_DTPREL64MSB; break;
11197 case BFD_RELOC_IA64_DIR64LSB:
11198 new = BFD_RELOC_IA64_DTPREL64LSB; break;
11199 case BFD_RELOC_IA64_IMM14:
11200 new = BFD_RELOC_IA64_DTPREL14; break;
11201 case BFD_RELOC_IA64_IMM22:
11202 new = BFD_RELOC_IA64_DTPREL22; break;
11203 case BFD_RELOC_IA64_IMM64:
11204 new = BFD_RELOC_IA64_DTPREL64I; break;
11206 type = "DTPREL"; break;
11210 case FUNC_LT_DTP_RELATIVE:
11213 case BFD_RELOC_IA64_IMM22:
11214 new = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11216 type = "LTOFF_DTPREL"; break;
11220 case FUNC_IPLT_RELOC:
11223 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11224 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11225 default: type = "IPLT"; break;
11243 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11244 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11245 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11246 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11247 case BFD_RELOC_IA64_IMM14: width = 14; break;
11248 case BFD_RELOC_IA64_IMM22: width = 22; break;
11249 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11253 /* This should be an error, but since previously there wasn't any
11254 diagnostic here, dont't make it fail because of this for now. */
11255 as_warn ("Cannot express %s%d%s relocation", type, width, suffix);
11260 /* Here is where generate the appropriate reloc for pseudo relocation
11263 ia64_validate_fix (fix)
11266 switch (fix->fx_r_type)
11268 case BFD_RELOC_IA64_FPTR64I:
11269 case BFD_RELOC_IA64_FPTR32MSB:
11270 case BFD_RELOC_IA64_FPTR64LSB:
11271 case BFD_RELOC_IA64_LTOFF_FPTR22:
11272 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11273 if (fix->fx_offset != 0)
11274 as_bad_where (fix->fx_file, fix->fx_line,
11275 "No addend allowed in @fptr() relocation");
11283 fix_insn (fix, odesc, value)
11285 const struct ia64_operand *odesc;
11288 bfd_vma insn[3], t0, t1, control_bits;
11293 slot = fix->fx_where & 0x3;
11294 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11296 /* Bundles are always in little-endian byte order */
11297 t0 = bfd_getl64 (fixpos);
11298 t1 = bfd_getl64 (fixpos + 8);
11299 control_bits = t0 & 0x1f;
11300 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11301 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11302 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11305 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11307 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11308 insn[2] |= (((value & 0x7f) << 13)
11309 | (((value >> 7) & 0x1ff) << 27)
11310 | (((value >> 16) & 0x1f) << 22)
11311 | (((value >> 21) & 0x1) << 21)
11312 | (((value >> 63) & 0x1) << 36));
11314 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11316 if (value & ~0x3fffffffffffffffULL)
11317 err = "integer operand out of range";
11318 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11319 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11321 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11324 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11325 insn[2] |= ((((value >> 59) & 0x1) << 36)
11326 | (((value >> 0) & 0xfffff) << 13));
11329 err = (*odesc->insert) (odesc, value, insn + slot);
11332 as_bad_where (fix->fx_file, fix->fx_line, err);
11334 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11335 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11336 number_to_chars_littleendian (fixpos + 0, t0, 8);
11337 number_to_chars_littleendian (fixpos + 8, t1, 8);
11340 /* Attempt to simplify or even eliminate a fixup. The return value is
11341 ignored; perhaps it was once meaningful, but now it is historical.
11342 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11344 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11348 md_apply_fix3 (fix, valP, seg)
11351 segT seg ATTRIBUTE_UNUSED;
11354 valueT value = *valP;
11356 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11360 switch (fix->fx_r_type)
11362 case BFD_RELOC_IA64_PCREL21B: break;
11363 case BFD_RELOC_IA64_PCREL21BI: break;
11364 case BFD_RELOC_IA64_PCREL21F: break;
11365 case BFD_RELOC_IA64_PCREL21M: break;
11366 case BFD_RELOC_IA64_PCREL60B: break;
11367 case BFD_RELOC_IA64_PCREL22: break;
11368 case BFD_RELOC_IA64_PCREL64I: break;
11369 case BFD_RELOC_IA64_PCREL32MSB: break;
11370 case BFD_RELOC_IA64_PCREL32LSB: break;
11371 case BFD_RELOC_IA64_PCREL64MSB: break;
11372 case BFD_RELOC_IA64_PCREL64LSB: break;
11374 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11381 switch (fix->fx_r_type)
11383 case BFD_RELOC_UNUSED:
11384 /* This must be a TAG13 or TAG13b operand. There are no external
11385 relocs defined for them, so we must give an error. */
11386 as_bad_where (fix->fx_file, fix->fx_line,
11387 "%s must have a constant value",
11388 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11392 case BFD_RELOC_IA64_TPREL14:
11393 case BFD_RELOC_IA64_TPREL22:
11394 case BFD_RELOC_IA64_TPREL64I:
11395 case BFD_RELOC_IA64_LTOFF_TPREL22:
11396 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11397 case BFD_RELOC_IA64_DTPREL14:
11398 case BFD_RELOC_IA64_DTPREL22:
11399 case BFD_RELOC_IA64_DTPREL64I:
11400 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11401 S_SET_THREAD_LOCAL (fix->fx_addsy);
11408 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11410 if (fix->tc_fix_data.bigendian)
11411 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11413 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11418 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11423 /* Generate the BFD reloc to be stuck in the object file from the
11424 fixup used internally in the assembler. */
11427 tc_gen_reloc (sec, fixp)
11428 asection *sec ATTRIBUTE_UNUSED;
11433 reloc = xmalloc (sizeof (*reloc));
11434 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
11435 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11436 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11437 reloc->addend = fixp->fx_offset;
11438 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11442 as_bad_where (fixp->fx_file, fixp->fx_line,
11443 "Cannot represent %s relocation in object file",
11444 bfd_get_reloc_code_name (fixp->fx_r_type));
11449 /* Turn a string in input_line_pointer into a floating point constant
11450 of type TYPE, and store the appropriate bytes in *LIT. The number
11451 of LITTLENUMS emitted is stored in *SIZE. An error message is
11452 returned, or NULL on OK. */
11454 #define MAX_LITTLENUMS 5
11457 md_atof (type, lit, size)
11462 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11492 return "Bad call to MD_ATOF()";
11494 t = atof_ieee (input_line_pointer, type, words);
11496 input_line_pointer = t;
11498 (*ia64_float_to_chars) (lit, words, prec);
11502 /* It is 10 byte floating point with 6 byte padding. */
11503 memset (&lit [10], 0, 6);
11504 *size = 8 * sizeof (LITTLENUM_TYPE);
11507 *size = prec * sizeof (LITTLENUM_TYPE);
11512 /* Handle ia64 specific semantics of the align directive. */
11515 ia64_md_do_align (n, fill, len, max)
11516 int n ATTRIBUTE_UNUSED;
11517 const char *fill ATTRIBUTE_UNUSED;
11518 int len ATTRIBUTE_UNUSED;
11519 int max ATTRIBUTE_UNUSED;
11521 if (subseg_text_p (now_seg))
11522 ia64_flush_insns ();
11525 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11526 of an rs_align_code fragment. */
11529 ia64_handle_align (fragp)
11534 const unsigned char *nop;
11536 if (fragp->fr_type != rs_align_code)
11539 /* Check if this frag has to end with a stop bit. */
11540 nop = fragp->tc_frag_data ? le_nop_stop : le_nop;
11542 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11543 p = fragp->fr_literal + fragp->fr_fix;
11545 /* If no paddings are needed, we check if we need a stop bit. */
11546 if (!bytes && fragp->tc_frag_data)
11548 if (fragp->fr_fix < 16)
11550 /* FIXME: It won't work with
11552 alloc r32=ar.pfs,1,2,4,0
11556 as_bad_where (fragp->fr_file, fragp->fr_line,
11557 _("Can't add stop bit to mark end of instruction group"));
11560 /* Bundles are always in little-endian byte order. Make sure
11561 the previous bundle has the stop bit. */
11565 /* Make sure we are on a 16-byte boundary, in case someone has been
11566 putting data into a text section. */
11569 int fix = bytes & 15;
11570 memset (p, 0, fix);
11573 fragp->fr_fix += fix;
11576 /* Instruction bundles are always little-endian. */
11577 memcpy (p, nop, 16);
11578 fragp->fr_var = 16;
11582 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11587 number_to_chars_bigendian (lit, (long) (*words++),
11588 sizeof (LITTLENUM_TYPE));
11589 lit += sizeof (LITTLENUM_TYPE);
11594 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11599 number_to_chars_littleendian (lit, (long) (words[prec]),
11600 sizeof (LITTLENUM_TYPE));
11601 lit += sizeof (LITTLENUM_TYPE);
11606 ia64_elf_section_change_hook (void)
11608 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11609 && elf_linked_to_section (now_seg) == NULL)
11610 elf_linked_to_section (now_seg) = text_section;
11611 dot_byteorder (-1);
11614 /* Check if a label should be made global. */
11616 ia64_check_label (symbolS *label)
11618 if (*input_line_pointer == ':')
11620 S_SET_EXTERNAL (label);
11621 input_line_pointer++;
11625 /* Used to remember where .alias and .secalias directives are seen. We
11626 will rename symbol and section names when we are about to output
11627 the relocatable file. */
11630 char *file; /* The file where the directive is seen. */
11631 unsigned int line; /* The line number the directive is at. */
11632 const char *name; /* The orignale name of the symbol. */
11635 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11636 .secalias. Otherwise, it is .alias. */
11638 dot_alias (int section)
11640 char *name, *alias;
11644 const char *error_string;
11647 struct hash_control *ahash, *nhash;
11650 name = input_line_pointer;
11651 delim = get_symbol_end ();
11652 end_name = input_line_pointer;
11655 if (name == end_name)
11657 as_bad (_("expected symbol name"));
11658 discard_rest_of_line ();
11662 SKIP_WHITESPACE ();
11664 if (*input_line_pointer != ',')
11667 as_bad (_("expected comma after \"%s\""), name);
11669 ignore_rest_of_line ();
11673 input_line_pointer++;
11675 ia64_canonicalize_symbol_name (name);
11677 /* We call demand_copy_C_string to check if alias string is valid.
11678 There should be a closing `"' and no `\0' in the string. */
11679 alias = demand_copy_C_string (&len);
11682 ignore_rest_of_line ();
11686 /* Make a copy of name string. */
11687 len = strlen (name) + 1;
11688 obstack_grow (¬es, name, len);
11689 name = obstack_finish (¬es);
11694 ahash = secalias_hash;
11695 nhash = secalias_name_hash;
11700 ahash = alias_hash;
11701 nhash = alias_name_hash;
11704 /* Check if alias has been used before. */
11705 h = (struct alias *) hash_find (ahash, alias);
11708 if (strcmp (h->name, name))
11709 as_bad (_("`%s' is already the alias of %s `%s'"),
11710 alias, kind, h->name);
11714 /* Check if name already has an alias. */
11715 a = (const char *) hash_find (nhash, name);
11718 if (strcmp (a, alias))
11719 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11723 h = (struct alias *) xmalloc (sizeof (struct alias));
11724 as_where (&h->file, &h->line);
11727 error_string = hash_jam (ahash, alias, (PTR) h);
11730 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11731 alias, kind, error_string);
11735 error_string = hash_jam (nhash, name, (PTR) alias);
11738 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11739 alias, kind, error_string);
11741 obstack_free (¬es, name);
11742 obstack_free (¬es, alias);
11745 demand_empty_rest_of_line ();
11748 /* It renames the original symbol name to its alias. */
11750 do_alias (const char *alias, PTR value)
11752 struct alias *h = (struct alias *) value;
11753 symbolS *sym = symbol_find (h->name);
11756 as_warn_where (h->file, h->line,
11757 _("symbol `%s' aliased to `%s' is not used"),
11760 S_SET_NAME (sym, (char *) alias);
11763 /* Called from write_object_file. */
11765 ia64_adjust_symtab (void)
11767 hash_traverse (alias_hash, do_alias);
11770 /* It renames the original section name to its alias. */
11772 do_secalias (const char *alias, PTR value)
11774 struct alias *h = (struct alias *) value;
11775 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11778 as_warn_where (h->file, h->line,
11779 _("section `%s' aliased to `%s' is not used"),
11785 /* Called from write_object_file. */
11787 ia64_frob_file (void)
11789 hash_traverse (secalias_hash, do_secalias);