1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998, 1999, 2000 Free Software Foundation.
5 This file is part of GAS, the GNU Assembler.
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
46 #include "dwarf2dbg.h"
49 #include "opcode/ia64.h"
53 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
54 #define MIN(a,b) ((a) < (b) ? (a) : (b))
57 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
58 #define CURR_SLOT md.slot[md.curr_slot]
60 #define O_pseudo_fixup (O_max + 1)
64 SPECIAL_SECTION_BSS = 0,
66 SPECIAL_SECTION_SDATA,
67 SPECIAL_SECTION_RODATA,
68 SPECIAL_SECTION_COMMENT,
69 SPECIAL_SECTION_UNWIND,
70 SPECIAL_SECTION_UNWIND_INFO
83 FUNC_LT_FPTR_RELATIVE,
89 REG_FR = (REG_GR + 128),
90 REG_AR = (REG_FR + 128),
91 REG_CR = (REG_AR + 128),
92 REG_P = (REG_CR + 128),
93 REG_BR = (REG_P + 64),
94 REG_IP = (REG_BR + 8),
101 /* The following are pseudo-registers for use by gas only. */
113 /* The following pseudo-registers are used for unwind directives only: */
121 DYNREG_GR = 0, /* dynamic general purpose register */
122 DYNREG_FR, /* dynamic floating point register */
123 DYNREG_PR, /* dynamic predicate register */
127 /* On the ia64, we can't know the address of a text label until the
128 instructions are packed into a bundle. To handle this, we keep
129 track of the list of labels that appear in front of each
133 struct label_fix *next;
137 extern int target_big_endian;
139 /* Characters which always start a comment. */
140 const char comment_chars[] = "";
142 /* Characters which start a comment at the beginning of a line. */
143 const char line_comment_chars[] = "#";
145 /* Characters which may be used to separate multiple commands on a
147 const char line_separator_chars[] = ";";
149 /* Characters which are used to indicate an exponent in a floating
151 const char EXP_CHARS[] = "eE";
153 /* Characters which mean that a number is a floating point constant,
155 const char FLT_CHARS[] = "rRsSfFdDxXpP";
157 /* ia64-specific option processing: */
159 const char *md_shortopts = "m:N:x::";
161 struct option md_longopts[] =
163 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
164 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
165 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
166 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
169 size_t md_longopts_size = sizeof (md_longopts);
173 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
174 struct hash_control *reg_hash; /* register name hash table */
175 struct hash_control *dynreg_hash; /* dynamic register hash table */
176 struct hash_control *const_hash; /* constant hash table */
177 struct hash_control *entry_hash; /* code entry hint hash table */
179 symbolS *regsym[REG_NUM];
181 /* If X_op is != O_absent, the registername for the instruction's
182 qualifying predicate. If NULL, p0 is assumed for instructions
183 that are predicatable. */
190 explicit_mode : 1, /* which mode we're in */
191 default_explicit_mode : 1, /* which mode is the default */
192 mode_explicitly_set : 1, /* was the current mode explicitly set? */
195 /* Each bundle consists of up to three instructions. We keep
196 track of four most recent instructions so we can correctly set
197 the end_of_insn_group for the last instruction in a bundle. */
199 int num_slots_in_use;
203 end_of_insn_group : 1,
204 manual_bundling_on : 1,
205 manual_bundling_off : 1;
206 signed char user_template; /* user-selected template, if any */
207 unsigned char qp_regno; /* qualifying predicate */
208 /* This duplicates a good fraction of "struct fix" but we
209 can't use a "struct fix" instead since we can't call
210 fix_new_exp() until we know the address of the instruction. */
214 bfd_reloc_code_real_type code;
215 enum ia64_opnd opnd; /* type of operand in need of fix */
216 unsigned int is_pcrel : 1; /* is operand pc-relative? */
217 expressionS expr; /* the value to be inserted */
219 fixup[2]; /* at most two fixups per insn */
220 struct ia64_opcode *idesc;
221 struct label_fix *label_fixups;
222 struct label_fix *tag_fixups;
223 struct unw_rec_list *unwind_record; /* Unwind directive. */
226 unsigned int src_line;
227 struct dwarf2_line_info debug_line;
235 struct dynreg *next; /* next dynamic register */
237 unsigned short base; /* the base register number */
238 unsigned short num_regs; /* # of registers in this set */
240 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
242 flagword flags; /* ELF-header flags */
245 unsigned hint:1; /* is this hint currently valid? */
246 bfd_vma offset; /* mem.offset offset */
247 bfd_vma base; /* mem.offset base */
250 int path; /* number of alt. entry points seen */
251 const char **entry_labels; /* labels of all alternate paths in
252 the current DV-checking block. */
253 int maxpaths; /* size currently allocated for
255 /* Support for hardware errata workarounds. */
257 /* Record data about the last three insn groups. */
260 /* B-step workaround.
261 For each predicate register, this is set if the corresponding insn
262 group conditionally sets this register with one of the affected
265 /* B-step workaround.
266 For each general register, this is set if the corresponding insn
267 a) is conditional one one of the predicate registers for which
268 P_REG_SET is 1 in the corresponding entry of the previous group,
269 b) sets this general register with one of the affected
271 int g_reg_set_conditionally[128];
277 /* application registers: */
283 #define AR_BSPSTORE 18
298 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
299 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
300 {"ar.rsc", 16}, {"ar.bsp", 17},
301 {"ar.bspstore", 18}, {"ar.rnat", 19},
302 {"ar.fcr", 21}, {"ar.eflag", 24},
303 {"ar.csd", 25}, {"ar.ssd", 26},
304 {"ar.cflg", 27}, {"ar.fsr", 28},
305 {"ar.fir", 29}, {"ar.fdr", 30},
306 {"ar.ccv", 32}, {"ar.unat", 36},
307 {"ar.fpsr", 40}, {"ar.itc", 44},
308 {"ar.pfs", 64}, {"ar.lc", 65},
329 /* control registers: */
371 static const struct const_desc
378 /* PSR constant masks: */
381 {"psr.be", ((valueT) 1) << 1},
382 {"psr.up", ((valueT) 1) << 2},
383 {"psr.ac", ((valueT) 1) << 3},
384 {"psr.mfl", ((valueT) 1) << 4},
385 {"psr.mfh", ((valueT) 1) << 5},
387 {"psr.ic", ((valueT) 1) << 13},
388 {"psr.i", ((valueT) 1) << 14},
389 {"psr.pk", ((valueT) 1) << 15},
391 {"psr.dt", ((valueT) 1) << 17},
392 {"psr.dfl", ((valueT) 1) << 18},
393 {"psr.dfh", ((valueT) 1) << 19},
394 {"psr.sp", ((valueT) 1) << 20},
395 {"psr.pp", ((valueT) 1) << 21},
396 {"psr.di", ((valueT) 1) << 22},
397 {"psr.si", ((valueT) 1) << 23},
398 {"psr.db", ((valueT) 1) << 24},
399 {"psr.lp", ((valueT) 1) << 25},
400 {"psr.tb", ((valueT) 1) << 26},
401 {"psr.rt", ((valueT) 1) << 27},
402 /* 28-31: reserved */
403 /* 32-33: cpl (current privilege level) */
404 {"psr.is", ((valueT) 1) << 34},
405 {"psr.mc", ((valueT) 1) << 35},
406 {"psr.it", ((valueT) 1) << 36},
407 {"psr.id", ((valueT) 1) << 37},
408 {"psr.da", ((valueT) 1) << 38},
409 {"psr.dd", ((valueT) 1) << 39},
410 {"psr.ss", ((valueT) 1) << 40},
411 /* 41-42: ri (restart instruction) */
412 {"psr.ed", ((valueT) 1) << 43},
413 {"psr.bn", ((valueT) 1) << 44},
416 /* indirect register-sets/memory: */
425 { "CPUID", IND_CPUID },
426 { "cpuid", IND_CPUID },
438 /* Pseudo functions used to indicate relocation types (these functions
439 start with an at sign (@). */
461 /* reloc pseudo functions (these must come first!): */
462 { "fptr", PSEUDO_FUNC_RELOC },
463 { "gprel", PSEUDO_FUNC_RELOC },
464 { "ltoff", PSEUDO_FUNC_RELOC },
465 { "pcrel", PSEUDO_FUNC_RELOC },
466 { "pltoff", PSEUDO_FUNC_RELOC },
467 { "secrel", PSEUDO_FUNC_RELOC },
468 { "segrel", PSEUDO_FUNC_RELOC },
469 { "ltv", PSEUDO_FUNC_RELOC },
470 { 0, }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
472 /* mbtype4 constants: */
473 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
474 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
475 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
476 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
477 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
479 /* fclass constants: */
480 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
481 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
482 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
483 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
484 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
485 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
486 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
487 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
488 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
490 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
492 /* unwind-related constants: */
493 { "svr4", PSEUDO_FUNC_CONST, { 0 } },
494 { "hpux", PSEUDO_FUNC_CONST, { 1 } },
495 { "nt", PSEUDO_FUNC_CONST, { 2 } },
497 /* unwind-related registers: */
498 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
501 /* 41-bit nop opcodes (one per unit): */
502 static const bfd_vma nop[IA64_NUM_UNITS] =
504 0x0000000000LL, /* NIL => break 0 */
505 0x0008000000LL, /* I-unit nop */
506 0x0008000000LL, /* M-unit nop */
507 0x4000000000LL, /* B-unit nop */
508 0x0008000000LL, /* F-unit nop */
509 0x0008000000LL, /* L-"unit" nop */
510 0x0008000000LL, /* X-unit nop */
513 /* Can't be `const' as it's passed to input routines (which have the
514 habit of setting temporary sentinels. */
515 static char special_section_name[][20] =
517 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
518 {".IA_64.unwind"}, {".IA_64.unwind_info"}
521 /* The best template for a particular sequence of up to three
523 #define N IA64_NUM_TYPES
524 static unsigned char best_template[N][N][N];
527 /* Resource dependencies currently in effect */
529 int depind; /* dependency index */
530 const struct ia64_dependency *dependency; /* actual dependency */
531 unsigned specific:1, /* is this a specific bit/regno? */
532 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
533 int index; /* specific regno/bit within dependency */
534 int note; /* optional qualifying note (0 if none) */
538 int insn_srlz; /* current insn serialization state */
539 int data_srlz; /* current data serialization state */
540 int qp_regno; /* qualifying predicate for this usage */
541 char *file; /* what file marked this dependency */
542 int line; /* what line marked this dependency */
543 struct mem_offset mem_offset; /* optional memory offset hint */
544 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
545 int path; /* corresponding code entry index */
547 static int regdepslen = 0;
548 static int regdepstotlen = 0;
549 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
550 static const char *dv_sem[] = { "none", "implied", "impliedf",
551 "data", "instr", "specific", "stop", "other" };
552 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
554 /* Current state of PR mutexation */
555 static struct qpmutex {
558 } *qp_mutexes = NULL; /* QP mutex bitmasks */
559 static int qp_mutexeslen = 0;
560 static int qp_mutexestotlen = 0;
561 static valueT qp_safe_across_calls = 0;
563 /* Current state of PR implications */
564 static struct qp_imply {
567 unsigned p2_branched:1;
569 } *qp_implies = NULL;
570 static int qp_implieslen = 0;
571 static int qp_impliestotlen = 0;
573 /* Keep track of static GR values so that indirect register usage can
574 sometimes be tracked. */
579 } gr_values[128] = {{ 1, 0 }};
581 /* These are the routines required to output the various types of
584 /* A slot_number is a frag address plus the slot index (0-2). We use the
585 frag address here so that if there is a section switch in the middle of
586 a function, then instructions emitted to a different section are not
587 counted. Since there may be more than one frag for a function, this
588 means we also need to keep track of which frag this address belongs to
589 so we can compute inter-frag distances. This also nicely solves the
590 problem with nops emitted for align directives, which can't easily be
591 counted, but can easily be derived from frag sizes. */
593 typedef struct unw_rec_list {
595 unsigned long slot_number;
597 struct unw_rec_list *next;
600 #define SLOT_NUM_NOT_SET -1
604 unsigned long next_slot_number;
605 fragS *next_slot_frag;
607 /* Maintain a list of unwind entries for the current function. */
611 /* Any unwind entires that should be attached to the current slot
612 that an insn is being constructed for. */
613 unw_rec_list *current_entry;
615 /* These are used to create the unwind table entry for this function. */
618 symbolS *info; /* pointer to unwind info */
619 symbolS *personality_routine;
621 /* TRUE if processing unwind directives in a prologue region. */
626 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
628 /* Forward delarations: */
629 static int ar_is_in_integer_unit PARAMS ((int regnum));
630 static void set_section PARAMS ((char *name));
631 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
632 unsigned int, unsigned int));
633 static void dot_radix PARAMS ((int));
634 static void dot_special_section PARAMS ((int));
635 static void dot_proc PARAMS ((int));
636 static void dot_fframe PARAMS ((int));
637 static void dot_vframe PARAMS ((int));
638 static void dot_vframesp PARAMS ((int));
639 static void dot_vframepsp PARAMS ((int));
640 static void dot_save PARAMS ((int));
641 static void dot_restore PARAMS ((int));
642 static void dot_restorereg PARAMS ((int));
643 static void dot_restorereg_p PARAMS ((int));
644 static void dot_handlerdata PARAMS ((int));
645 static void dot_unwentry PARAMS ((int));
646 static void dot_altrp PARAMS ((int));
647 static void dot_savemem PARAMS ((int));
648 static void dot_saveg PARAMS ((int));
649 static void dot_savef PARAMS ((int));
650 static void dot_saveb PARAMS ((int));
651 static void dot_savegf PARAMS ((int));
652 static void dot_spill PARAMS ((int));
653 static void dot_spillreg PARAMS ((int));
654 static void dot_spillmem PARAMS ((int));
655 static void dot_spillreg_p PARAMS ((int));
656 static void dot_spillmem_p PARAMS ((int));
657 static void dot_label_state PARAMS ((int));
658 static void dot_copy_state PARAMS ((int));
659 static void dot_unwabi PARAMS ((int));
660 static void dot_personality PARAMS ((int));
661 static void dot_body PARAMS ((int));
662 static void dot_prologue PARAMS ((int));
663 static void dot_endp PARAMS ((int));
664 static void dot_template PARAMS ((int));
665 static void dot_regstk PARAMS ((int));
666 static void dot_rot PARAMS ((int));
667 static void dot_byteorder PARAMS ((int));
668 static void dot_psr PARAMS ((int));
669 static void dot_alias PARAMS ((int));
670 static void dot_ln PARAMS ((int));
671 static char *parse_section_name PARAMS ((void));
672 static void dot_xdata PARAMS ((int));
673 static void stmt_float_cons PARAMS ((int));
674 static void stmt_cons_ua PARAMS ((int));
675 static void dot_xfloat_cons PARAMS ((int));
676 static void dot_xstringer PARAMS ((int));
677 static void dot_xdata_ua PARAMS ((int));
678 static void dot_xfloat_cons_ua PARAMS ((int));
679 static void print_prmask PARAMS ((valueT mask));
680 static void dot_pred_rel PARAMS ((int));
681 static void dot_reg_val PARAMS ((int));
682 static void dot_dv_mode PARAMS ((int));
683 static void dot_entry PARAMS ((int));
684 static void dot_mem_offset PARAMS ((int));
685 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
686 static symbolS *declare_register PARAMS ((const char *name, int regnum));
687 static void declare_register_set PARAMS ((const char *, int, int));
688 static unsigned int operand_width PARAMS ((enum ia64_opnd));
689 static int operand_match PARAMS ((const struct ia64_opcode *idesc,
690 int index, expressionS *e));
691 static int parse_operand PARAMS ((expressionS *e));
692 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
693 static void build_insn PARAMS ((struct slot *, bfd_vma *));
694 static void emit_one_bundle PARAMS ((void));
695 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
696 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
697 bfd_reloc_code_real_type r_type));
698 static void insn_group_break PARAMS ((int, int, int));
699 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
700 struct rsrc *, int depind, int path));
701 static void add_qp_mutex PARAMS((valueT mask));
702 static void add_qp_imply PARAMS((int p1, int p2));
703 static void clear_qp_branch_flag PARAMS((valueT mask));
704 static void clear_qp_mutex PARAMS((valueT mask));
705 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
706 static void clear_register_values PARAMS ((void));
707 static void print_dependency PARAMS ((const char *action, int depind));
708 static void instruction_serialization PARAMS ((void));
709 static void data_serialization PARAMS ((void));
710 static void remove_marked_resource PARAMS ((struct rsrc *));
711 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
712 static int is_taken_branch PARAMS ((struct ia64_opcode *));
713 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
714 static int depends_on PARAMS ((int, struct ia64_opcode *));
715 static int specify_resource PARAMS ((const struct ia64_dependency *,
716 struct ia64_opcode *, int, struct rsrc [], int, int));
717 static int check_dv PARAMS((struct ia64_opcode *idesc));
718 static void check_dependencies PARAMS((struct ia64_opcode *));
719 static void mark_resources PARAMS((struct ia64_opcode *));
720 static void update_dependencies PARAMS((struct ia64_opcode *));
721 static void note_register_values PARAMS((struct ia64_opcode *));
722 static int qp_mutex PARAMS ((int, int, int));
723 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
724 static void output_vbyte_mem PARAMS ((int, char *, char *));
725 static void count_output PARAMS ((int, char *, char *));
726 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
727 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
728 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
729 static void output_P1_format PARAMS ((vbyte_func, int));
730 static void output_P2_format PARAMS ((vbyte_func, int, int));
731 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
732 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
733 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
734 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
735 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
736 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
737 static void output_P9_format PARAMS ((vbyte_func, int, int));
738 static void output_P10_format PARAMS ((vbyte_func, int, int));
739 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
740 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
741 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
742 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
743 static char format_ab_reg PARAMS ((int, int));
744 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
746 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
747 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
749 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
750 static void free_list_records PARAMS ((unw_rec_list *));
751 static unw_rec_list *output_prologue PARAMS ((void));
752 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
753 static unw_rec_list *output_body PARAMS ((void));
754 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
755 static unw_rec_list *output_mem_stack_v PARAMS ((void));
756 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
757 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
758 static unw_rec_list *output_rp_when PARAMS ((void));
759 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
760 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
761 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
762 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
763 static unw_rec_list *output_pfs_when PARAMS ((void));
764 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
765 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
766 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
767 static unw_rec_list *output_preds_when PARAMS ((void));
768 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
769 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
770 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
771 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
772 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
773 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
774 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
775 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
776 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
777 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
778 static unw_rec_list *output_unat_when PARAMS ((void));
779 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
780 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
781 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
782 static unw_rec_list *output_lc_when PARAMS ((void));
783 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
784 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
785 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
786 static unw_rec_list *output_fpsr_when PARAMS ((void));
787 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
788 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
789 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
790 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
791 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
792 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
793 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
794 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
795 static unw_rec_list *output_bsp_when PARAMS ((void));
796 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
797 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
798 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
799 static unw_rec_list *output_bspstore_when PARAMS ((void));
800 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
801 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
802 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
803 static unw_rec_list *output_rnat_when PARAMS ((void));
804 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
805 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
806 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
807 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
808 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
809 static unw_rec_list *output_label_state PARAMS ((unsigned long));
810 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
811 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
812 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
813 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
815 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
817 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
819 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
820 unsigned int, unsigned int));
821 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
822 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
823 static int calc_record_size PARAMS ((unw_rec_list *));
824 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
825 static int count_bits PARAMS ((unsigned long));
826 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
827 unsigned long, fragS *));
828 static void fixup_unw_records PARAMS ((unw_rec_list *));
829 static int output_unw_records PARAMS ((unw_rec_list *, void **));
830 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
831 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
832 static int generate_unwind_image PARAMS ((void));
834 /* Determine if application register REGNUM resides in the integer
835 unit (as opposed to the memory unit). */
837 ar_is_in_integer_unit (reg)
842 return (reg == 64 /* pfs */
843 || reg == 65 /* lc */
844 || reg == 66 /* ec */
845 /* ??? ias accepts and puts these in the integer unit. */
846 || (reg >= 112 && reg <= 127));
849 /* Switch to section NAME and create section if necessary. It's
850 rather ugly that we have to manipulate input_line_pointer but I
851 don't see any other way to accomplish the same thing without
852 changing obj-elf.c (which may be the Right Thing, in the end). */
857 char *saved_input_line_pointer;
859 saved_input_line_pointer = input_line_pointer;
860 input_line_pointer = name;
862 input_line_pointer = saved_input_line_pointer;
865 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
868 ia64_elf_section_flags (flags, attr, type)
872 if (attr & SHF_IA_64_SHORT)
873 flags |= SEC_SMALL_DATA;
878 set_regstack (ins, locs, outs, rots)
879 unsigned int ins, locs, outs, rots;
884 sof = ins + locs + outs;
887 as_bad ("Size of frame exceeds maximum of 96 registers");
892 as_warn ("Size of rotating registers exceeds frame size");
895 md.in.base = REG_GR + 32;
896 md.loc.base = md.in.base + ins;
897 md.out.base = md.loc.base + locs;
899 md.in.num_regs = ins;
900 md.loc.num_regs = locs;
901 md.out.num_regs = outs;
902 md.rot.num_regs = rots;
909 struct label_fix *lfix;
911 subsegT saved_subseg;
913 if (!md.last_text_seg)
917 saved_subseg = now_subseg;
919 subseg_set (md.last_text_seg, 0);
921 while (md.num_slots_in_use > 0)
922 emit_one_bundle (); /* force out queued instructions */
924 /* In case there are labels following the last instruction, resolve
926 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
928 S_SET_VALUE (lfix->sym, frag_now_fix ());
929 symbol_set_frag (lfix->sym, frag_now);
931 CURR_SLOT.label_fixups = 0;
932 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
934 S_SET_VALUE (lfix->sym, frag_now_fix ());
935 symbol_set_frag (lfix->sym, frag_now);
937 CURR_SLOT.tag_fixups = 0;
939 subseg_set (saved_seg, saved_subseg);
941 if (md.qp.X_op == O_register)
942 as_bad ("qualifying predicate not followed by instruction");
946 ia64_do_align (nbytes)
949 char *saved_input_line_pointer = input_line_pointer;
951 input_line_pointer = "";
952 s_align_bytes (nbytes);
953 input_line_pointer = saved_input_line_pointer;
957 ia64_cons_align (nbytes)
962 char *saved_input_line_pointer = input_line_pointer;
963 input_line_pointer = "";
964 s_align_bytes (nbytes);
965 input_line_pointer = saved_input_line_pointer;
969 /* Output COUNT bytes to a memory location. */
970 static unsigned char *vbyte_mem_ptr = NULL;
973 output_vbyte_mem (count, ptr, comment)
979 if (vbyte_mem_ptr == NULL)
984 for (x = 0; x < count; x++)
985 *(vbyte_mem_ptr++) = ptr[x];
988 /* Count the number of bytes required for records. */
989 static int vbyte_count = 0;
991 count_output (count, ptr, comment)
996 vbyte_count += count;
1000 output_R1_format (f, rtype, rlen)
1002 unw_record_type rtype;
1009 output_R3_format (f, rtype, rlen);
1015 else if (rtype != prologue)
1016 as_bad ("record type is not valid");
1018 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1019 (*f) (1, &byte, NULL);
1023 output_R2_format (f, mask, grsave, rlen)
1030 mask = (mask & 0x0f);
1031 grsave = (grsave & 0x7f);
1033 bytes[0] = (UNW_R2 | (mask >> 1));
1034 bytes[1] = (((mask & 0x01) << 7) | grsave);
1035 count += output_leb128 (bytes + 2, rlen, 0);
1036 (*f) (count, bytes, NULL);
1040 output_R3_format (f, rtype, rlen)
1042 unw_record_type rtype;
1049 output_R1_format (f, rtype, rlen);
1055 else if (rtype != prologue)
1056 as_bad ("record type is not valid");
1057 bytes[0] = (UNW_R3 | r);
1058 count = output_leb128 (bytes + 1, rlen, 0);
1059 (*f) (count + 1, bytes, NULL);
1063 output_P1_format (f, brmask)
1068 byte = UNW_P1 | (brmask & 0x1f);
1069 (*f) (1, &byte, NULL);
1073 output_P2_format (f, brmask, gr)
1079 brmask = (brmask & 0x1f);
1080 bytes[0] = UNW_P2 | (brmask >> 1);
1081 bytes[1] = (((brmask & 1) << 7) | gr);
1082 (*f) (2, bytes, NULL);
1086 output_P3_format (f, rtype, reg)
1088 unw_record_type rtype;
1133 as_bad ("Invalid record type for P3 format.");
1135 bytes[0] = (UNW_P3 | (r >> 1));
1136 bytes[1] = (((r & 1) << 7) | reg);
1137 (*f) (2, bytes, NULL);
1141 output_P4_format (f, imask, imask_size)
1143 unsigned char *imask;
1144 unsigned long imask_size;
1147 (*f) (imask_size, imask, NULL);
1151 output_P5_format (f, grmask, frmask)
1154 unsigned long frmask;
1157 grmask = (grmask & 0x0f);
1160 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1161 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1162 bytes[3] = (frmask & 0x000000ff);
1163 (*f) (4, bytes, NULL);
1167 output_P6_format (f, rtype, rmask)
1169 unw_record_type rtype;
1175 if (rtype == gr_mem)
1177 else if (rtype != fr_mem)
1178 as_bad ("Invalid record type for format P6");
1179 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1180 (*f) (1, &byte, NULL);
1184 output_P7_format (f, rtype, w1, w2)
1186 unw_record_type rtype;
1193 count += output_leb128 (bytes + 1, w1, 0);
1198 count += output_leb128 (bytes + count, w2 >> 4, 0);
1248 bytes[0] = (UNW_P7 | r);
1249 (*f) (count, bytes, NULL);
1253 output_P8_format (f, rtype, t)
1255 unw_record_type rtype;
1294 case bspstore_psprel:
1297 case bspstore_sprel:
1309 case priunat_when_gr:
1312 case priunat_psprel:
1318 case priunat_when_mem:
1325 count += output_leb128 (bytes + 2, t, 0);
1326 (*f) (count, bytes, NULL);
1330 output_P9_format (f, grmask, gr)
1337 bytes[1] = (grmask & 0x0f);
1338 bytes[2] = (gr & 0x7f);
1339 (*f) (3, bytes, NULL);
1343 output_P10_format (f, abi, context)
1350 bytes[1] = (abi & 0xff);
1351 bytes[2] = (context & 0xff);
1352 (*f) (3, bytes, NULL);
1356 output_B1_format (f, rtype, label)
1358 unw_record_type rtype;
1359 unsigned long label;
1365 output_B4_format (f, rtype, label);
1368 if (rtype == copy_state)
1370 else if (rtype != label_state)
1371 as_bad ("Invalid record type for format B1");
1373 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1374 (*f) (1, &byte, NULL);
1378 output_B2_format (f, ecount, t)
1380 unsigned long ecount;
1387 output_B3_format (f, ecount, t);
1390 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1391 count += output_leb128 (bytes + 1, t, 0);
1392 (*f) (count, bytes, NULL);
1396 output_B3_format (f, ecount, t)
1398 unsigned long ecount;
1405 output_B2_format (f, ecount, t);
1409 count += output_leb128 (bytes + 1, t, 0);
1410 count += output_leb128 (bytes + count, ecount, 0);
1411 (*f) (count, bytes, NULL);
1415 output_B4_format (f, rtype, label)
1417 unw_record_type rtype;
1418 unsigned long label;
1425 output_B1_format (f, rtype, label);
1429 if (rtype == copy_state)
1431 else if (rtype != label_state)
1432 as_bad ("Invalid record type for format B1");
1434 bytes[0] = (UNW_B4 | (r << 3));
1435 count += output_leb128 (bytes + 1, label, 0);
1436 (*f) (count, bytes, NULL);
1440 format_ab_reg (ab, reg)
1447 ret = (ab << 5) | reg;
1452 output_X1_format (f, rtype, ab, reg, t, w1)
1454 unw_record_type rtype;
1464 if (rtype == spill_sprel)
1466 else if (rtype != spill_psprel)
1467 as_bad ("Invalid record type for format X1");
1468 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1469 count += output_leb128 (bytes + 2, t, 0);
1470 count += output_leb128 (bytes + count, w1, 0);
1471 (*f) (count, bytes, NULL);
1475 output_X2_format (f, ab, reg, x, y, treg, t)
1484 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1485 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1486 count += output_leb128 (bytes + 3, t, 0);
1487 (*f) (count, bytes, NULL);
1491 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1493 unw_record_type rtype;
1504 if (rtype == spill_sprel_p)
1506 else if (rtype != spill_psprel_p)
1507 as_bad ("Invalid record type for format X3");
1508 bytes[1] = ((r << 7) | (qp & 0x3f));
1509 bytes[2] = format_ab_reg (ab, reg);
1510 count += output_leb128 (bytes + 3, t, 0);
1511 count += output_leb128 (bytes + count, w1, 0);
1512 (*f) (count, bytes, NULL);
1516 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1526 bytes[1] = (qp & 0x3f);
1527 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1528 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1529 count += output_leb128 (bytes + 4, t, 0);
1530 (*f) (count, bytes, NULL);
1533 /* This function allocates a record list structure, and initializes fields. */
1535 static unw_rec_list *
1536 alloc_record (unw_record_type t)
1539 ptr = xmalloc (sizeof (*ptr));
1541 ptr->slot_number = SLOT_NUM_NOT_SET;
1546 /* This function frees an entire list of record structures. */
1549 free_list_records (unw_rec_list *first)
1552 for (ptr = first; ptr != NULL;)
1554 unw_rec_list *tmp = ptr;
1556 if ((tmp->r.type == prologue || tmp->r.type == prologue_gr)
1557 && tmp->r.record.r.mask.i)
1558 free (tmp->r.record.r.mask.i);
1565 static unw_rec_list *
1568 unw_rec_list *ptr = alloc_record (prologue);
1569 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1573 static unw_rec_list *
1574 output_prologue_gr (saved_mask, reg)
1575 unsigned int saved_mask;
1578 unw_rec_list *ptr = alloc_record (prologue_gr);
1579 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1580 ptr->r.record.r.grmask = saved_mask;
1581 ptr->r.record.r.grsave = reg;
1585 static unw_rec_list *
1588 unw_rec_list *ptr = alloc_record (body);
1592 static unw_rec_list *
1593 output_mem_stack_f (size)
1596 unw_rec_list *ptr = alloc_record (mem_stack_f);
1597 ptr->r.record.p.size = size;
1601 static unw_rec_list *
1602 output_mem_stack_v ()
1604 unw_rec_list *ptr = alloc_record (mem_stack_v);
1608 static unw_rec_list *
1612 unw_rec_list *ptr = alloc_record (psp_gr);
1613 ptr->r.record.p.gr = gr;
1617 static unw_rec_list *
1618 output_psp_sprel (offset)
1619 unsigned int offset;
1621 unw_rec_list *ptr = alloc_record (psp_sprel);
1622 ptr->r.record.p.spoff = offset / 4;
1626 static unw_rec_list *
1629 unw_rec_list *ptr = alloc_record (rp_when);
1633 static unw_rec_list *
1637 unw_rec_list *ptr = alloc_record (rp_gr);
1638 ptr->r.record.p.gr = gr;
1642 static unw_rec_list *
1646 unw_rec_list *ptr = alloc_record (rp_br);
1647 ptr->r.record.p.br = br;
1651 static unw_rec_list *
1652 output_rp_psprel (offset)
1653 unsigned int offset;
1655 unw_rec_list *ptr = alloc_record (rp_psprel);
1656 ptr->r.record.p.pspoff = offset / 4;
1660 static unw_rec_list *
1661 output_rp_sprel (offset)
1662 unsigned int offset;
1664 unw_rec_list *ptr = alloc_record (rp_sprel);
1665 ptr->r.record.p.spoff = offset / 4;
1669 static unw_rec_list *
1672 unw_rec_list *ptr = alloc_record (pfs_when);
1676 static unw_rec_list *
1680 unw_rec_list *ptr = alloc_record (pfs_gr);
1681 ptr->r.record.p.gr = gr;
1685 static unw_rec_list *
1686 output_pfs_psprel (offset)
1687 unsigned int offset;
1689 unw_rec_list *ptr = alloc_record (pfs_psprel);
1690 ptr->r.record.p.pspoff = offset / 4;
1694 static unw_rec_list *
1695 output_pfs_sprel (offset)
1696 unsigned int offset;
1698 unw_rec_list *ptr = alloc_record (pfs_sprel);
1699 ptr->r.record.p.spoff = offset / 4;
1703 static unw_rec_list *
1704 output_preds_when ()
1706 unw_rec_list *ptr = alloc_record (preds_when);
1710 static unw_rec_list *
1711 output_preds_gr (gr)
1714 unw_rec_list *ptr = alloc_record (preds_gr);
1715 ptr->r.record.p.gr = gr;
1719 static unw_rec_list *
1720 output_preds_psprel (offset)
1721 unsigned int offset;
1723 unw_rec_list *ptr = alloc_record (preds_psprel);
1724 ptr->r.record.p.pspoff = offset / 4;
1728 static unw_rec_list *
1729 output_preds_sprel (offset)
1730 unsigned int offset;
1732 unw_rec_list *ptr = alloc_record (preds_sprel);
1733 ptr->r.record.p.spoff = offset / 4;
1737 static unw_rec_list *
1738 output_fr_mem (mask)
1741 unw_rec_list *ptr = alloc_record (fr_mem);
1742 ptr->r.record.p.rmask = mask;
1746 static unw_rec_list *
1747 output_frgr_mem (gr_mask, fr_mask)
1748 unsigned int gr_mask;
1749 unsigned int fr_mask;
1751 unw_rec_list *ptr = alloc_record (frgr_mem);
1752 ptr->r.record.p.grmask = gr_mask;
1753 ptr->r.record.p.frmask = fr_mask;
1757 static unw_rec_list *
1758 output_gr_gr (mask, reg)
1762 unw_rec_list *ptr = alloc_record (gr_gr);
1763 ptr->r.record.p.grmask = mask;
1764 ptr->r.record.p.gr = reg;
1768 static unw_rec_list *
1769 output_gr_mem (mask)
1772 unw_rec_list *ptr = alloc_record (gr_mem);
1773 ptr->r.record.p.rmask = mask;
1777 static unw_rec_list *
1778 output_br_mem (unsigned int mask)
1780 unw_rec_list *ptr = alloc_record (br_mem);
1781 ptr->r.record.p.brmask = mask;
1785 static unw_rec_list *
1786 output_br_gr (save_mask, reg)
1787 unsigned int save_mask;
1790 unw_rec_list *ptr = alloc_record (br_gr);
1791 ptr->r.record.p.brmask = save_mask;
1792 ptr->r.record.p.gr = reg;
1796 static unw_rec_list *
1797 output_spill_base (offset)
1798 unsigned int offset;
1800 unw_rec_list *ptr = alloc_record (spill_base);
1801 ptr->r.record.p.pspoff = offset / 4;
1805 static unw_rec_list *
1808 unw_rec_list *ptr = alloc_record (unat_when);
1812 static unw_rec_list *
1816 unw_rec_list *ptr = alloc_record (unat_gr);
1817 ptr->r.record.p.gr = gr;
1821 static unw_rec_list *
1822 output_unat_psprel (offset)
1823 unsigned int offset;
1825 unw_rec_list *ptr = alloc_record (unat_psprel);
1826 ptr->r.record.p.pspoff = offset / 4;
1830 static unw_rec_list *
1831 output_unat_sprel (offset)
1832 unsigned int offset;
1834 unw_rec_list *ptr = alloc_record (unat_sprel);
1835 ptr->r.record.p.spoff = offset / 4;
1839 static unw_rec_list *
1842 unw_rec_list *ptr = alloc_record (lc_when);
1846 static unw_rec_list *
1850 unw_rec_list *ptr = alloc_record (lc_gr);
1851 ptr->r.record.p.gr = gr;
1855 static unw_rec_list *
1856 output_lc_psprel (offset)
1857 unsigned int offset;
1859 unw_rec_list *ptr = alloc_record (lc_psprel);
1860 ptr->r.record.p.pspoff = offset / 4;
1864 static unw_rec_list *
1865 output_lc_sprel (offset)
1866 unsigned int offset;
1868 unw_rec_list *ptr = alloc_record (lc_sprel);
1869 ptr->r.record.p.spoff = offset / 4;
1873 static unw_rec_list *
1876 unw_rec_list *ptr = alloc_record (fpsr_when);
1880 static unw_rec_list *
1884 unw_rec_list *ptr = alloc_record (fpsr_gr);
1885 ptr->r.record.p.gr = gr;
1889 static unw_rec_list *
1890 output_fpsr_psprel (offset)
1891 unsigned int offset;
1893 unw_rec_list *ptr = alloc_record (fpsr_psprel);
1894 ptr->r.record.p.pspoff = offset / 4;
1898 static unw_rec_list *
1899 output_fpsr_sprel (offset)
1900 unsigned int offset;
1902 unw_rec_list *ptr = alloc_record (fpsr_sprel);
1903 ptr->r.record.p.spoff = offset / 4;
1907 static unw_rec_list *
1908 output_priunat_when_gr ()
1910 unw_rec_list *ptr = alloc_record (priunat_when_gr);
1914 static unw_rec_list *
1915 output_priunat_when_mem ()
1917 unw_rec_list *ptr = alloc_record (priunat_when_mem);
1921 static unw_rec_list *
1922 output_priunat_gr (gr)
1925 unw_rec_list *ptr = alloc_record (priunat_gr);
1926 ptr->r.record.p.gr = gr;
1930 static unw_rec_list *
1931 output_priunat_psprel (offset)
1932 unsigned int offset;
1934 unw_rec_list *ptr = alloc_record (priunat_psprel);
1935 ptr->r.record.p.pspoff = offset / 4;
1939 static unw_rec_list *
1940 output_priunat_sprel (offset)
1941 unsigned int offset;
1943 unw_rec_list *ptr = alloc_record (priunat_sprel);
1944 ptr->r.record.p.spoff = offset / 4;
1948 static unw_rec_list *
1951 unw_rec_list *ptr = alloc_record (bsp_when);
1955 static unw_rec_list *
1959 unw_rec_list *ptr = alloc_record (bsp_gr);
1960 ptr->r.record.p.gr = gr;
1964 static unw_rec_list *
1965 output_bsp_psprel (offset)
1966 unsigned int offset;
1968 unw_rec_list *ptr = alloc_record (bsp_psprel);
1969 ptr->r.record.p.pspoff = offset / 4;
1973 static unw_rec_list *
1974 output_bsp_sprel (offset)
1975 unsigned int offset;
1977 unw_rec_list *ptr = alloc_record (bsp_sprel);
1978 ptr->r.record.p.spoff = offset / 4;
1982 static unw_rec_list *
1983 output_bspstore_when ()
1985 unw_rec_list *ptr = alloc_record (bspstore_when);
1989 static unw_rec_list *
1990 output_bspstore_gr (gr)
1993 unw_rec_list *ptr = alloc_record (bspstore_gr);
1994 ptr->r.record.p.gr = gr;
1998 static unw_rec_list *
1999 output_bspstore_psprel (offset)
2000 unsigned int offset;
2002 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2003 ptr->r.record.p.pspoff = offset / 4;
2007 static unw_rec_list *
2008 output_bspstore_sprel (offset)
2009 unsigned int offset;
2011 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2012 ptr->r.record.p.spoff = offset / 4;
2016 static unw_rec_list *
2019 unw_rec_list *ptr = alloc_record (rnat_when);
2023 static unw_rec_list *
2027 unw_rec_list *ptr = alloc_record (rnat_gr);
2028 ptr->r.record.p.gr = gr;
2032 static unw_rec_list *
2033 output_rnat_psprel (offset)
2034 unsigned int offset;
2036 unw_rec_list *ptr = alloc_record (rnat_psprel);
2037 ptr->r.record.p.pspoff = offset / 4;
2041 static unw_rec_list *
2042 output_rnat_sprel (offset)
2043 unsigned int offset;
2045 unw_rec_list *ptr = alloc_record (rnat_sprel);
2046 ptr->r.record.p.spoff = offset / 4;
2050 static unw_rec_list *
2051 output_unwabi (abi, context)
2053 unsigned long context;
2055 unw_rec_list *ptr = alloc_record (unwabi);
2056 ptr->r.record.p.abi = abi;
2057 ptr->r.record.p.context = context;
2061 static unw_rec_list *
2062 output_epilogue (unsigned long ecount)
2064 unw_rec_list *ptr = alloc_record (epilogue);
2065 ptr->r.record.b.ecount = ecount;
2069 static unw_rec_list *
2070 output_label_state (unsigned long label)
2072 unw_rec_list *ptr = alloc_record (label_state);
2073 ptr->r.record.b.label = label;
2077 static unw_rec_list *
2078 output_copy_state (unsigned long label)
2080 unw_rec_list *ptr = alloc_record (copy_state);
2081 ptr->r.record.b.label = label;
2085 static unw_rec_list *
2086 output_spill_psprel (ab, reg, offset)
2089 unsigned int offset;
2091 unw_rec_list *ptr = alloc_record (spill_psprel);
2092 ptr->r.record.x.ab = ab;
2093 ptr->r.record.x.reg = reg;
2094 ptr->r.record.x.pspoff = offset / 4;
2098 static unw_rec_list *
2099 output_spill_sprel (ab, reg, offset)
2102 unsigned int offset;
2104 unw_rec_list *ptr = alloc_record (spill_sprel);
2105 ptr->r.record.x.ab = ab;
2106 ptr->r.record.x.reg = reg;
2107 ptr->r.record.x.spoff = offset / 4;
2111 static unw_rec_list *
2112 output_spill_psprel_p (ab, reg, offset, predicate)
2115 unsigned int offset;
2116 unsigned int predicate;
2118 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2119 ptr->r.record.x.ab = ab;
2120 ptr->r.record.x.reg = reg;
2121 ptr->r.record.x.pspoff = offset / 4;
2122 ptr->r.record.x.qp = predicate;
2126 static unw_rec_list *
2127 output_spill_sprel_p (ab, reg, offset, predicate)
2130 unsigned int offset;
2131 unsigned int predicate;
2133 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2134 ptr->r.record.x.ab = ab;
2135 ptr->r.record.x.reg = reg;
2136 ptr->r.record.x.spoff = offset / 4;
2137 ptr->r.record.x.qp = predicate;
2141 static unw_rec_list *
2142 output_spill_reg (ab, reg, targ_reg, xy)
2145 unsigned int targ_reg;
2148 unw_rec_list *ptr = alloc_record (spill_reg);
2149 ptr->r.record.x.ab = ab;
2150 ptr->r.record.x.reg = reg;
2151 ptr->r.record.x.treg = targ_reg;
2152 ptr->r.record.x.xy = xy;
2156 static unw_rec_list *
2157 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2160 unsigned int targ_reg;
2162 unsigned int predicate;
2164 unw_rec_list *ptr = alloc_record (spill_reg_p);
2165 ptr->r.record.x.ab = ab;
2166 ptr->r.record.x.reg = reg;
2167 ptr->r.record.x.treg = targ_reg;
2168 ptr->r.record.x.xy = xy;
2169 ptr->r.record.x.qp = predicate;
2173 /* Given a unw_rec_list process the correct format with the
2174 specified function. */
2177 process_one_record (ptr, f)
2181 unsigned long fr_mask, gr_mask;
2183 switch (ptr->r.type)
2189 /* These are taken care of by prologue/prologue_gr. */
2194 if (ptr->r.type == prologue_gr)
2195 output_R2_format (f, ptr->r.record.r.grmask,
2196 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2198 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2200 /* Output descriptor(s) for union of register spills (if any). */
2201 gr_mask = ptr->r.record.r.mask.gr_mem;
2202 fr_mask = ptr->r.record.r.mask.fr_mem;
2205 if ((fr_mask & ~0xfUL) == 0)
2206 output_P6_format (f, fr_mem, fr_mask);
2209 output_P5_format (f, gr_mask, fr_mask);
2214 output_P6_format (f, gr_mem, gr_mask);
2215 if (ptr->r.record.r.mask.br_mem)
2216 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2218 /* output imask descriptor if necessary: */
2219 if (ptr->r.record.r.mask.i)
2220 output_P4_format (f, ptr->r.record.r.mask.i,
2221 ptr->r.record.r.imask_size);
2225 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2229 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2230 ptr->r.record.p.size);
2243 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2246 output_P3_format (f, rp_br, ptr->r.record.p.br);
2249 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2257 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2266 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2276 case bspstore_sprel:
2278 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2281 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2284 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2287 as_bad ("spill_mask record unimplemented.");
2289 case priunat_when_gr:
2290 case priunat_when_mem:
2294 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2296 case priunat_psprel:
2298 case bspstore_psprel:
2300 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2303 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2306 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2310 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2313 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2314 ptr->r.record.x.reg, ptr->r.record.x.t,
2315 ptr->r.record.x.pspoff);
2318 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2319 ptr->r.record.x.reg, ptr->r.record.x.t,
2320 ptr->r.record.x.spoff);
2323 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2324 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2325 ptr->r.record.x.treg, ptr->r.record.x.t);
2327 case spill_psprel_p:
2328 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2329 ptr->r.record.x.ab, ptr->r.record.x.reg,
2330 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2333 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2334 ptr->r.record.x.ab, ptr->r.record.x.reg,
2335 ptr->r.record.x.t, ptr->r.record.x.spoff);
2338 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2339 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2340 ptr->r.record.x.xy, ptr->r.record.x.treg,
2344 as_bad ("record_type_not_valid");
2349 /* Given a unw_rec_list list, process all the records with
2350 the specified function. */
2352 process_unw_records (list, f)
2357 for (ptr = list; ptr; ptr = ptr->next)
2358 process_one_record (ptr, f);
2361 /* Determine the size of a record list in bytes. */
2363 calc_record_size (list)
2367 process_unw_records (list, count_output);
2371 /* Update IMASK bitmask to reflect the fact that one or more registers
2372 of type TYPE are saved starting at instruction with index T. If N
2373 bits are set in REGMASK, it is assumed that instructions T through
2374 T+N-1 save these registers.
2378 1: instruction saves next fp reg
2379 2: instruction saves next general reg
2380 3: instruction saves next branch reg */
2382 set_imask (region, regmask, t, type)
2383 unw_rec_list *region;
2384 unsigned long regmask;
2388 unsigned char *imask;
2389 unsigned long imask_size;
2393 imask = region->r.record.r.mask.i;
2394 imask_size = region->r.record.r.imask_size;
2397 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2398 imask = xmalloc (imask_size);
2399 memset (imask, 0, imask_size);
2401 region->r.record.r.imask_size = imask_size;
2402 region->r.record.r.mask.i = imask;
2406 pos = 2 * (3 - t % 4);
2409 if (i >= imask_size)
2411 as_bad ("Ignoring attempt to spill beyond end of region");
2415 imask[i] |= (type & 0x3) << pos;
2417 regmask &= (regmask - 1);
2428 count_bits (unsigned long mask)
2440 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2441 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2442 containing FIRST_ADDR. */
2445 slot_index (slot_addr, slot_frag, first_addr, first_frag)
2446 unsigned long slot_addr;
2448 unsigned long first_addr;
2451 unsigned long index = 0;
2453 /* First time we are called, the initial address and frag are invalid. */
2454 if (first_addr == 0)
2457 /* If the two addresses are in different frags, then we need to add in
2458 the remaining size of this frag, and then the entire size of intermediate
2460 while (slot_frag != first_frag)
2462 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2464 /* Add in the full size of the frag converted to instruction slots. */
2465 index += 3 * (first_frag->fr_fix >> 4);
2466 /* Subtract away the initial part before first_addr. */
2467 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2468 + ((first_addr & 0x3) - (start_addr & 0x3)));
2470 /* Move to the beginning of the next frag. */
2471 first_frag = first_frag->fr_next;
2472 first_addr = (unsigned long) &first_frag->fr_literal;
2475 /* Add in the used part of the last frag. */
2476 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2477 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2481 /* Given a complete record list, process any records which have
2482 unresolved fields, (ie length counts for a prologue). After
2483 this has been run, all neccessary information should be available
2484 within each record to generate an image. */
2487 fixup_unw_records (list)
2490 unw_rec_list *ptr, *region = 0;
2491 unsigned long first_addr = 0, rlen = 0, t;
2492 fragS *first_frag = 0;
2494 for (ptr = list; ptr; ptr = ptr->next)
2496 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2497 as_bad (" Insn slot not set in unwind record.");
2498 t = slot_index (ptr->slot_number, ptr->slot_frag,
2499 first_addr, first_frag);
2500 switch (ptr->r.type)
2507 int size, dir_len = 0;
2508 unsigned long last_addr;
2511 first_addr = ptr->slot_number;
2512 first_frag = ptr->slot_frag;
2513 ptr->slot_number = 0;
2514 /* Find either the next body/prologue start, or the end of
2515 the list, and determine the size of the region. */
2516 last_addr = unwind.next_slot_number;
2517 last_frag = unwind.next_slot_frag;
2518 for (last = ptr->next; last != NULL; last = last->next)
2519 if (last->r.type == prologue || last->r.type == prologue_gr
2520 || last->r.type == body)
2522 last_addr = last->slot_number;
2523 last_frag = last->slot_frag;
2526 else if (!last->next)
2528 /* In the absence of an explicit .body directive,
2529 the prologue ends after the last instruction
2530 covered by an unwind directive. */
2531 if (ptr->r.type != body)
2533 last_addr = last->slot_number;
2534 last_frag = last->slot_frag;
2535 switch (last->r.type)
2538 dir_len = (count_bits (last->r.record.p.frmask)
2539 + count_bits (last->r.record.p.grmask));
2543 dir_len += count_bits (last->r.record.p.rmask);
2547 dir_len += count_bits (last->r.record.p.brmask);
2550 dir_len += count_bits (last->r.record.p.grmask);
2559 size = (slot_index (last_addr, last_frag, first_addr, first_frag)
2561 rlen = ptr->r.record.r.rlen = size;
2566 ptr->r.record.b.t = rlen - 1 - t;
2577 case priunat_when_gr:
2578 case priunat_when_mem:
2582 ptr->r.record.p.t = t;
2590 case spill_psprel_p:
2591 ptr->r.record.x.t = t;
2597 as_bad ("frgr_mem record before region record!\n");
2600 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2601 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2602 set_imask (region, ptr->r.record.p.frmask, t, 1);
2603 set_imask (region, ptr->r.record.p.grmask, t, 2);
2608 as_bad ("fr_mem record before region record!\n");
2611 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2612 set_imask (region, ptr->r.record.p.rmask, t, 1);
2617 as_bad ("gr_mem record before region record!\n");
2620 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2621 set_imask (region, ptr->r.record.p.rmask, t, 2);
2626 as_bad ("br_mem record before region record!\n");
2629 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2630 set_imask (region, ptr->r.record.p.brmask, t, 3);
2636 as_bad ("gr_gr record before region record!\n");
2639 set_imask (region, ptr->r.record.p.grmask, t, 2);
2644 as_bad ("br_gr record before region record!\n");
2647 set_imask (region, ptr->r.record.p.brmask, t, 3);
2656 /* Generate an unwind image from a record list. Returns the number of
2657 bytes in the resulting image. The memory image itselof is returned
2658 in the 'ptr' parameter. */
2660 output_unw_records (list, ptr)
2664 int size, x, extra = 0;
2667 fixup_unw_records (list);
2668 size = calc_record_size (list);
2670 /* pad to 8 byte boundry. */
2674 /* Add 8 for the header + 8 more bytes for the personality offset. */
2675 mem = xmalloc (size + extra + 16);
2677 vbyte_mem_ptr = mem + 8;
2678 /* Clear the padding area and personality. */
2679 memset (mem + 8 + size, 0 , extra + 8);
2680 /* Initialize the header area. */
2681 md_number_to_chars (mem, (((bfd_vma) 1 << 48) /* version */
2682 | (unwind.personality_routine
2683 ? ((bfd_vma) 3 << 32) /* U & E handler flags */
2685 | ((size + extra) / 8)), /* length (dwords) */
2688 process_unw_records (list, output_vbyte_mem);
2691 return size + extra + 16;
2695 convert_expr_to_ab_reg (e, ab, regp)
2702 if (e->X_op != O_register)
2705 reg = e->X_add_number;
2706 if (reg >= REG_GR + 4 && reg <= REG_GR + 7)
2709 *regp = reg - REG_GR;
2711 else if ((reg >= REG_FR + 2 && reg <= REG_FR + 5)
2712 || (reg >= REG_FR + 16 && reg <= REG_FR + 31))
2715 *regp = reg - REG_FR;
2717 else if (reg >= REG_BR + 1 && reg <= REG_BR + 5)
2720 *regp = reg - REG_BR;
2727 case REG_PR: *regp = 0; break;
2728 case REG_PSP: *regp = 1; break;
2729 case REG_PRIUNAT: *regp = 2; break;
2730 case REG_BR + 0: *regp = 3; break;
2731 case REG_AR + AR_BSP: *regp = 4; break;
2732 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2733 case REG_AR + AR_RNAT: *regp = 6; break;
2734 case REG_AR + AR_UNAT: *regp = 7; break;
2735 case REG_AR + AR_FPSR: *regp = 8; break;
2736 case REG_AR + AR_PFS: *regp = 9; break;
2737 case REG_AR + AR_LC: *regp = 10; break;
2747 convert_expr_to_xy_reg (e, xy, regp)
2754 if (e->X_op != O_register)
2757 reg = e->X_add_number;
2759 if (reg >= REG_GR && reg <= REG_GR + 127)
2762 *regp = reg - REG_GR;
2764 else if (reg >= REG_FR && reg <= REG_FR + 127)
2767 *regp = reg - REG_FR;
2769 else if (reg >= REG_BR && reg <= REG_BR + 7)
2772 *regp = reg - REG_BR;
2786 radix = *input_line_pointer++;
2788 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
2790 as_bad ("Radix `%c' unsupported", *input_line_pointer);
2791 ignore_rest_of_line ();
2796 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
2798 dot_special_section (which)
2801 set_section ((char *) special_section_name[which]);
2805 add_unwind_entry (ptr)
2809 unwind.tail->next = ptr;
2814 /* The current entry can in fact be a chain of unwind entries. */
2815 if (unwind.current_entry == NULL)
2816 unwind.current_entry = ptr;
2827 if (e.X_op != O_constant)
2828 as_bad ("Operand to .fframe must be a constant");
2830 add_unwind_entry (output_mem_stack_f (e.X_add_number));
2841 reg = e.X_add_number - REG_GR;
2842 if (e.X_op == O_register && reg < 128)
2844 add_unwind_entry (output_mem_stack_v ());
2845 if (! (unwind.prologue_mask & 2))
2846 add_unwind_entry (output_psp_gr (reg));
2849 as_bad ("First operand to .vframe must be a general register");
2853 dot_vframesp (dummy)
2859 if (e.X_op == O_constant)
2861 add_unwind_entry (output_mem_stack_v ());
2862 add_unwind_entry (output_psp_sprel (e.X_add_number));
2865 as_bad ("First operand to .vframesp must be a general register");
2869 dot_vframepsp (dummy)
2875 if (e.X_op == O_constant)
2877 add_unwind_entry (output_mem_stack_v ());
2878 add_unwind_entry (output_psp_sprel (e.X_add_number));
2881 as_bad ("First operand to .vframepsp must be a general register");
2892 sep = parse_operand (&e1);
2894 as_bad ("No second operand to .save");
2895 sep = parse_operand (&e2);
2897 reg1 = e1.X_add_number;
2898 reg2 = e2.X_add_number - REG_GR;
2900 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
2901 if (e1.X_op == O_register)
2903 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
2907 case REG_AR + AR_BSP:
2908 add_unwind_entry (output_bsp_when ());
2909 add_unwind_entry (output_bsp_gr (reg2));
2911 case REG_AR + AR_BSPSTORE:
2912 add_unwind_entry (output_bspstore_when ());
2913 add_unwind_entry (output_bspstore_gr (reg2));
2915 case REG_AR + AR_RNAT:
2916 add_unwind_entry (output_rnat_when ());
2917 add_unwind_entry (output_rnat_gr (reg2));
2919 case REG_AR + AR_UNAT:
2920 add_unwind_entry (output_unat_when ());
2921 add_unwind_entry (output_unat_gr (reg2));
2923 case REG_AR + AR_FPSR:
2924 add_unwind_entry (output_fpsr_when ());
2925 add_unwind_entry (output_fpsr_gr (reg2));
2927 case REG_AR + AR_PFS:
2928 add_unwind_entry (output_pfs_when ());
2929 if (! (unwind.prologue_mask & 4))
2930 add_unwind_entry (output_pfs_gr (reg2));
2932 case REG_AR + AR_LC:
2933 add_unwind_entry (output_lc_when ());
2934 add_unwind_entry (output_lc_gr (reg2));
2937 add_unwind_entry (output_rp_when ());
2938 if (! (unwind.prologue_mask & 8))
2939 add_unwind_entry (output_rp_gr (reg2));
2942 add_unwind_entry (output_preds_when ());
2943 if (! (unwind.prologue_mask & 1))
2944 add_unwind_entry (output_preds_gr (reg2));
2947 add_unwind_entry (output_priunat_when_gr ());
2948 add_unwind_entry (output_priunat_gr (reg2));
2951 as_bad ("First operand not a valid register");
2955 as_bad (" Second operand not a valid register");
2958 as_bad ("First operand not a register");
2966 unsigned long ecount = 0;
2969 sep = parse_operand (&e1);
2970 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
2972 as_bad ("First operand to .restore must be stack pointer (sp)");
2978 parse_operand (&e2);
2979 if (e1.X_op != O_constant)
2981 as_bad ("Second operand to .restore must be constant");
2986 add_unwind_entry (output_epilogue (ecount));
2990 dot_restorereg (dummy)
2993 unsigned int ab, reg;
2998 if (!convert_expr_to_ab_reg (&e, &ab, ®))
3000 as_bad ("First operand to .restorereg must be a preserved register");
3003 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3007 dot_restorereg_p (dummy)
3010 unsigned int qp, ab, reg;
3014 sep = parse_operand (&e1);
3017 as_bad ("No second operand to .restorereg.p");
3021 parse_operand (&e2);
3023 qp = e1.X_add_number - REG_P;
3024 if (e1.X_op != O_register || qp > 63)
3026 as_bad ("First operand to .restorereg.p must be a predicate");
3030 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3032 as_bad ("Second operand to .restorereg.p must be a preserved register");
3035 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3039 generate_unwind_image ()
3042 unsigned char *unw_rec;
3044 /* Force out pending instructions, to make sure all unwind records have
3045 a valid slot_number field. */
3046 ia64_flush_insns ();
3048 /* Generate the unwind record. */
3049 size = output_unw_records (unwind.list, (void **) &unw_rec);
3051 as_bad ("Unwind record is not a multiple of 8 bytes.");
3053 /* If there are unwind records, switch sections, and output the info. */
3056 unsigned char *where;
3058 set_section ((char *) special_section_name[SPECIAL_SECTION_UNWIND_INFO]);
3060 /* Make sure the section has 8 byte alignment. */
3061 record_alignment (now_seg, 3);
3063 /* Set expression which points to start of unwind descriptor area. */
3064 unwind.info = expr_build_dot ();
3066 where = (unsigned char *) frag_more (size);
3068 /* Issue a label for this address, and keep track of it to put it
3069 in the unwind section. */
3071 /* Copy the information from the unwind record into this section. The
3072 data is already in the correct byte order. */
3073 memcpy (where, unw_rec, size);
3074 /* Add the personality address to the image. */
3075 if (unwind.personality_routine != 0)
3077 exp.X_op = O_symbol;
3078 exp.X_add_symbol = unwind.personality_routine;
3079 exp.X_add_number = 0;
3080 fix_new_exp (frag_now, frag_now_fix () - 8, 8,
3081 &exp, 0, BFD_RELOC_IA64_LTOFF_FPTR64LSB);
3082 unwind.personality_routine = 0;
3084 obj_elf_previous (0);
3087 free_list_records (unwind.list);
3088 unwind.list = unwind.tail = unwind.current_entry = NULL;
3094 dot_handlerdata (dummy)
3097 generate_unwind_image ();
3098 demand_empty_rest_of_line ();
3102 dot_unwentry (dummy)
3105 demand_empty_rest_of_line ();
3116 reg = e.X_add_number - REG_BR;
3117 if (e.X_op == O_register && reg < 8)
3118 add_unwind_entry (output_rp_br (reg));
3120 as_bad ("First operand not a valid branch register");
3124 dot_savemem (psprel)
3131 sep = parse_operand (&e1);
3133 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3134 sep = parse_operand (&e2);
3136 reg1 = e1.X_add_number;
3137 val = e2.X_add_number;
3139 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3140 if (e1.X_op == O_register)
3142 if (e2.X_op == O_constant)
3146 case REG_AR + AR_BSP:
3147 add_unwind_entry (output_bsp_when ());
3148 add_unwind_entry ((psprel
3150 : output_bsp_sprel) (val));
3152 case REG_AR + AR_BSPSTORE:
3153 add_unwind_entry (output_bspstore_when ());
3154 add_unwind_entry ((psprel
3155 ? output_bspstore_psprel
3156 : output_bspstore_sprel) (val));
3158 case REG_AR + AR_RNAT:
3159 add_unwind_entry (output_rnat_when ());
3160 add_unwind_entry ((psprel
3161 ? output_rnat_psprel
3162 : output_rnat_sprel) (val));
3164 case REG_AR + AR_UNAT:
3165 add_unwind_entry (output_unat_when ());
3166 add_unwind_entry ((psprel
3167 ? output_unat_psprel
3168 : output_unat_sprel) (val));
3170 case REG_AR + AR_FPSR:
3171 add_unwind_entry (output_fpsr_when ());
3172 add_unwind_entry ((psprel
3173 ? output_fpsr_psprel
3174 : output_fpsr_sprel) (val));
3176 case REG_AR + AR_PFS:
3177 add_unwind_entry (output_pfs_when ());
3178 add_unwind_entry ((psprel
3180 : output_pfs_sprel) (val));
3182 case REG_AR + AR_LC:
3183 add_unwind_entry (output_lc_when ());
3184 add_unwind_entry ((psprel
3186 : output_lc_sprel) (val));
3189 add_unwind_entry (output_rp_when ());
3190 add_unwind_entry ((psprel
3192 : output_rp_sprel) (val));
3195 add_unwind_entry (output_preds_when ());
3196 add_unwind_entry ((psprel
3197 ? output_preds_psprel
3198 : output_preds_sprel) (val));
3201 add_unwind_entry (output_priunat_when_mem ());
3202 add_unwind_entry ((psprel
3203 ? output_priunat_psprel
3204 : output_priunat_sprel) (val));
3207 as_bad ("First operand not a valid register");
3211 as_bad (" Second operand not a valid constant");
3214 as_bad ("First operand not a register");
3223 sep = parse_operand (&e1);
3225 parse_operand (&e2);
3227 if (e1.X_op != O_constant)
3228 as_bad ("First operand to .save.g must be a constant.");
3231 int grmask = e1.X_add_number;
3233 add_unwind_entry (output_gr_mem (grmask));
3236 int reg = e2.X_add_number - REG_GR;
3237 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3238 add_unwind_entry (output_gr_gr (grmask, reg));
3240 as_bad ("Second operand is an invalid register.");
3251 sep = parse_operand (&e1);
3253 if (e1.X_op != O_constant)
3254 as_bad ("Operand to .save.f must be a constant.");
3256 add_unwind_entry (output_fr_mem (e1.X_add_number));
3268 sep = parse_operand (&e1);
3269 if (e1.X_op != O_constant)
3271 as_bad ("First operand to .save.b must be a constant.");
3274 brmask = e1.X_add_number;
3278 sep = parse_operand (&e2);
3279 reg = e2.X_add_number - REG_GR;
3280 if (e2.X_op != O_register || reg > 127)
3282 as_bad ("Second operand to .save.b must be a general register.");
3285 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3288 add_unwind_entry (output_br_mem (brmask));
3290 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3291 ignore_rest_of_line ();
3300 sep = parse_operand (&e1);
3302 parse_operand (&e2);
3304 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3305 as_bad ("Both operands of .save.gf must be constants.");
3308 int grmask = e1.X_add_number;
3309 int frmask = e2.X_add_number;
3310 add_unwind_entry (output_frgr_mem (grmask, frmask));
3321 sep = parse_operand (&e);
3322 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3323 ignore_rest_of_line ();
3325 if (e.X_op != O_constant)
3326 as_bad ("Operand to .spill must be a constant");
3328 add_unwind_entry (output_spill_base (e.X_add_number));
3332 dot_spillreg (dummy)
3335 int sep, ab, xy, reg, treg;
3338 sep = parse_operand (&e1);
3341 as_bad ("No second operand to .spillreg");
3345 parse_operand (&e2);
3347 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3349 as_bad ("First operand to .spillreg must be a preserved register");
3353 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3355 as_bad ("Second operand to .spillreg must be a register");
3359 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3363 dot_spillmem (psprel)
3369 sep = parse_operand (&e1);
3372 as_bad ("Second operand missing");
3376 parse_operand (&e2);
3378 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3380 as_bad ("First operand to .spill%s must be a preserved register",
3381 psprel ? "psp" : "sp");
3385 if (e2.X_op != O_constant)
3387 as_bad ("Second operand to .spill%s must be a constant",
3388 psprel ? "psp" : "sp");
3393 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3395 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3399 dot_spillreg_p (dummy)
3402 int sep, ab, xy, reg, treg;
3403 expressionS e1, e2, e3;
3406 sep = parse_operand (&e1);
3409 as_bad ("No second and third operand to .spillreg.p");
3413 sep = parse_operand (&e2);
3416 as_bad ("No third operand to .spillreg.p");
3420 parse_operand (&e3);
3422 qp = e1.X_add_number - REG_P;
3424 if (e1.X_op != O_register || qp > 63)
3426 as_bad ("First operand to .spillreg.p must be a predicate");
3430 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3432 as_bad ("Second operand to .spillreg.p must be a preserved register");
3436 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3438 as_bad ("Third operand to .spillreg.p must be a register");
3442 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3446 dot_spillmem_p (psprel)
3449 expressionS e1, e2, e3;
3453 sep = parse_operand (&e1);
3456 as_bad ("Second operand missing");
3460 parse_operand (&e2);
3463 as_bad ("Second operand missing");
3467 parse_operand (&e3);
3469 qp = e1.X_add_number - REG_P;
3470 if (e1.X_op != O_register || qp > 63)
3472 as_bad ("First operand to .spill%s_p must be a predicate",
3473 psprel ? "psp" : "sp");
3477 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3479 as_bad ("Second operand to .spill%s_p must be a preserved register",
3480 psprel ? "psp" : "sp");
3484 if (e3.X_op != O_constant)
3486 as_bad ("Third operand to .spill%s_p must be a constant",
3487 psprel ? "psp" : "sp");
3492 add_unwind_entry (output_spill_psprel_p (qp, ab, reg, e3.X_add_number));
3494 add_unwind_entry (output_spill_sprel_p (qp, ab, reg, e3.X_add_number));
3498 dot_label_state (dummy)
3504 if (e.X_op != O_constant)
3506 as_bad ("Operand to .label_state must be a constant");
3509 add_unwind_entry (output_label_state (e.X_add_number));
3513 dot_copy_state (dummy)
3519 if (e.X_op != O_constant)
3521 as_bad ("Operand to .copy_state must be a constant");
3524 add_unwind_entry (output_copy_state (e.X_add_number));
3534 sep = parse_operand (&e1);
3537 as_bad ("Second operand to .unwabi missing");
3540 sep = parse_operand (&e2);
3541 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3542 ignore_rest_of_line ();
3544 if (e1.X_op != O_constant)
3546 as_bad ("First operand to .unwabi must be a constant");
3550 if (e2.X_op != O_constant)
3552 as_bad ("Second operand to .unwabi must be a constant");
3556 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
3560 dot_personality (dummy)
3565 name = input_line_pointer;
3566 c = get_symbol_end ();
3567 p = input_line_pointer;
3568 unwind.personality_routine = symbol_find_or_make (name);
3571 demand_empty_rest_of_line ();
3581 unwind.proc_start = expr_build_dot ();
3582 /* Parse names of main and alternate entry points and mark them as
3583 function symbols: */
3587 name = input_line_pointer;
3588 c = get_symbol_end ();
3589 p = input_line_pointer;
3590 sym = symbol_find_or_make (name);
3591 if (unwind.proc_start == 0)
3593 unwind.proc_start = sym;
3595 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
3598 if (*input_line_pointer != ',')
3600 ++input_line_pointer;
3602 demand_empty_rest_of_line ();
3605 unwind.list = unwind.tail = unwind.current_entry = NULL;
3606 unwind.personality_routine = 0;
3613 unwind.prologue = 0;
3614 unwind.prologue_mask = 0;
3616 add_unwind_entry (output_body ());
3617 demand_empty_rest_of_line ();
3621 dot_prologue (dummy)
3625 int mask = 0, grsave;
3627 if (!is_it_end_of_statement ())
3630 sep = parse_operand (&e1);
3632 as_bad ("No second operand to .prologue");
3633 sep = parse_operand (&e2);
3634 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3635 ignore_rest_of_line ();
3637 if (e1.X_op == O_constant)
3639 mask = e1.X_add_number;
3641 if (e2.X_op == O_constant)
3642 grsave = e2.X_add_number;
3643 else if (e2.X_op == O_register
3644 && (grsave = e2.X_add_number - REG_GR) < 128)
3647 as_bad ("Second operand not a constant or general register");
3649 add_unwind_entry (output_prologue_gr (mask, grsave));
3652 as_bad ("First operand not a constant");
3655 add_unwind_entry (output_prologue ());
3657 unwind.prologue = 1;
3658 unwind.prologue_mask = mask;
3667 int bytes_per_address;
3670 subsegT saved_subseg;
3672 saved_seg = now_seg;
3673 saved_subseg = now_subseg;
3676 demand_empty_rest_of_line ();
3678 insn_group_break (1, 0, 0);
3680 /* If there was a .handlerdata, we haven't generated an image yet. */
3681 if (unwind.info == 0)
3683 generate_unwind_image ();
3686 subseg_set (md.last_text_seg, 0);
3687 unwind.proc_end = expr_build_dot ();
3689 set_section ((char *) special_section_name[SPECIAL_SECTION_UNWIND]);
3691 /* Make sure the section has 8 byte alignment. */
3692 record_alignment (now_seg, 3);
3694 ptr = frag_more (24);
3695 where = frag_now_fix () - 24;
3696 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
3698 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
3699 e.X_op = O_pseudo_fixup;
3700 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3702 e.X_add_symbol = unwind.proc_start;
3703 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
3705 e.X_op = O_pseudo_fixup;
3706 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3708 e.X_add_symbol = unwind.proc_end;
3709 ia64_cons_fix_new (frag_now, where + bytes_per_address, bytes_per_address, &e);
3711 if (unwind.info != 0)
3713 e.X_op = O_pseudo_fixup;
3714 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3716 e.X_add_symbol = unwind.info;
3717 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2), bytes_per_address, &e);
3720 md_number_to_chars (ptr + (bytes_per_address * 2), 0, bytes_per_address);
3722 subseg_set (saved_seg, saved_subseg);
3723 unwind.proc_start = unwind.proc_end = unwind.info = 0;
3727 dot_template (template)
3730 CURR_SLOT.user_template = template;
3737 int ins, locs, outs, rots;
3739 if (is_it_end_of_statement ())
3740 ins = locs = outs = rots = 0;
3743 ins = get_absolute_expression ();
3744 if (*input_line_pointer++ != ',')
3746 locs = get_absolute_expression ();
3747 if (*input_line_pointer++ != ',')
3749 outs = get_absolute_expression ();
3750 if (*input_line_pointer++ != ',')
3752 rots = get_absolute_expression ();
3754 set_regstack (ins, locs, outs, rots);
3758 as_bad ("Comma expected");
3759 ignore_rest_of_line ();
3766 unsigned num_regs, num_alloced = 0;
3767 struct dynreg **drpp, *dr;
3768 int ch, base_reg = 0;
3774 case DYNREG_GR: base_reg = REG_GR + 32; break;
3775 case DYNREG_FR: base_reg = REG_FR + 32; break;
3776 case DYNREG_PR: base_reg = REG_P + 16; break;
3780 /* First, remove existing names from hash table. */
3781 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
3783 hash_delete (md.dynreg_hash, dr->name);
3787 drpp = &md.dynreg[type];
3790 start = input_line_pointer;
3791 ch = get_symbol_end ();
3792 *input_line_pointer = ch;
3793 len = (input_line_pointer - start);
3796 if (*input_line_pointer != '[')
3798 as_bad ("Expected '['");
3801 ++input_line_pointer; /* skip '[' */
3803 num_regs = get_absolute_expression ();
3805 if (*input_line_pointer++ != ']')
3807 as_bad ("Expected ']'");
3812 num_alloced += num_regs;
3816 if (num_alloced > md.rot.num_regs)
3818 as_bad ("Used more than the declared %d rotating registers",
3824 if (num_alloced > 96)
3826 as_bad ("Used more than the available 96 rotating registers");
3831 if (num_alloced > 48)
3833 as_bad ("Used more than the available 48 rotating registers");
3842 name = obstack_alloc (¬es, len + 1);
3843 memcpy (name, start, len);
3848 *drpp = obstack_alloc (¬es, sizeof (*dr));
3849 memset (*drpp, 0, sizeof (*dr));
3854 dr->num_regs = num_regs;
3855 dr->base = base_reg;
3857 base_reg += num_regs;
3859 if (hash_insert (md.dynreg_hash, name, dr))
3861 as_bad ("Attempt to redefine register set `%s'", name);
3865 if (*input_line_pointer != ',')
3867 ++input_line_pointer; /* skip comma */
3870 demand_empty_rest_of_line ();
3874 ignore_rest_of_line ();
3878 dot_byteorder (byteorder)
3881 target_big_endian = byteorder;
3893 option = input_line_pointer;
3894 ch = get_symbol_end ();
3895 if (strcmp (option, "lsb") == 0)
3896 md.flags &= ~EF_IA_64_BE;
3897 else if (strcmp (option, "msb") == 0)
3898 md.flags |= EF_IA_64_BE;
3899 else if (strcmp (option, "abi32") == 0)
3900 md.flags &= ~EF_IA_64_ABI64;
3901 else if (strcmp (option, "abi64") == 0)
3902 md.flags |= EF_IA_64_ABI64;
3904 as_bad ("Unknown psr option `%s'", option);
3905 *input_line_pointer = ch;
3908 if (*input_line_pointer != ',')
3911 ++input_line_pointer;
3914 demand_empty_rest_of_line ();
3921 as_bad (".alias not implemented yet");
3928 new_logical_line (0, get_absolute_expression ());
3929 demand_empty_rest_of_line ();
3933 parse_section_name ()
3939 if (*input_line_pointer != '"')
3941 as_bad ("Missing section name");
3942 ignore_rest_of_line ();
3945 name = demand_copy_C_string (&len);
3948 ignore_rest_of_line ();
3952 if (*input_line_pointer != ',')
3954 as_bad ("Comma expected after section name");
3955 ignore_rest_of_line ();
3958 ++input_line_pointer; /* skip comma */
3966 char *name = parse_section_name ();
3972 obj_elf_previous (0);
3975 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
3978 stmt_float_cons (kind)
3985 case 'd': size = 8; break;
3986 case 'x': size = 10; break;
3993 ia64_do_align (size);
4001 int saved_auto_align = md.auto_align;
4005 md.auto_align = saved_auto_align;
4009 dot_xfloat_cons (kind)
4012 char *name = parse_section_name ();
4017 stmt_float_cons (kind);
4018 obj_elf_previous (0);
4022 dot_xstringer (zero)
4025 char *name = parse_section_name ();
4031 obj_elf_previous (0);
4038 int saved_auto_align = md.auto_align;
4039 char *name = parse_section_name ();
4046 md.auto_align = saved_auto_align;
4047 obj_elf_previous (0);
4051 dot_xfloat_cons_ua (kind)
4054 int saved_auto_align = md.auto_align;
4055 char *name = parse_section_name ();
4061 stmt_float_cons (kind);
4062 md.auto_align = saved_auto_align;
4063 obj_elf_previous (0);
4066 /* .reg.val <regname>,value */
4075 if (reg.X_op != O_register)
4077 as_bad (_("Register name expected"));
4078 ignore_rest_of_line ();
4080 else if (*input_line_pointer++ != ',')
4082 as_bad (_("Comma expected"));
4083 ignore_rest_of_line ();
4087 valueT value = get_absolute_expression ();
4088 int regno = reg.X_add_number;
4089 if (regno < REG_GR || regno > REG_GR + 128)
4090 as_warn (_("Register value annotation ignored"));
4093 gr_values[regno - REG_GR].known = 1;
4094 gr_values[regno - REG_GR].value = value;
4095 gr_values[regno - REG_GR].path = md.path;
4098 demand_empty_rest_of_line ();
4101 /* select dv checking mode
4106 A stop is inserted when changing modes
4113 if (md.manual_bundling)
4114 as_warn (_("Directive invalid within a bundle"));
4116 if (type == 'E' || type == 'A')
4117 md.mode_explicitly_set = 0;
4119 md.mode_explicitly_set = 1;
4126 if (md.explicit_mode)
4127 insn_group_break (1, 0, 0);
4128 md.explicit_mode = 0;
4132 if (!md.explicit_mode)
4133 insn_group_break (1, 0, 0);
4134 md.explicit_mode = 1;
4138 if (md.explicit_mode != md.default_explicit_mode)
4139 insn_group_break (1, 0, 0);
4140 md.explicit_mode = md.default_explicit_mode;
4141 md.mode_explicitly_set = 0;
4152 for (regno = 0; regno < 64; regno++)
4154 if (mask & ((valueT) 1 << regno))
4156 fprintf (stderr, "%s p%d", comma, regno);
4163 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4164 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4165 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4166 .pred.safe_across_calls p1 [, p2 [,...]]
4175 int p1 = -1, p2 = -1;
4179 if (*input_line_pointer != '"')
4181 as_bad (_("Missing predicate relation type"));
4182 ignore_rest_of_line ();
4188 char *form = demand_copy_C_string (&len);
4189 if (strcmp (form, "mutex") == 0)
4191 else if (strcmp (form, "clear") == 0)
4193 else if (strcmp (form, "imply") == 0)
4197 as_bad (_("Unrecognized predicate relation type"));
4198 ignore_rest_of_line ();
4202 if (*input_line_pointer == ',')
4203 ++input_line_pointer;
4213 if (toupper (*input_line_pointer) != 'P'
4214 || (regno = atoi (++input_line_pointer)) < 0
4217 as_bad (_("Predicate register expected"));
4218 ignore_rest_of_line ();
4221 while (isdigit (*input_line_pointer))
4222 ++input_line_pointer;
4229 as_warn (_("Duplicate predicate register ignored"));
4232 /* See if it's a range. */
4233 if (*input_line_pointer == '-')
4236 ++input_line_pointer;
4238 if (toupper (*input_line_pointer) != 'P'
4239 || (regno = atoi (++input_line_pointer)) < 0
4242 as_bad (_("Predicate register expected"));
4243 ignore_rest_of_line ();
4246 while (isdigit (*input_line_pointer))
4247 ++input_line_pointer;
4251 as_bad (_("Bad register range"));
4252 ignore_rest_of_line ();
4263 if (*input_line_pointer != ',')
4265 ++input_line_pointer;
4274 clear_qp_mutex (mask);
4275 clear_qp_implies (mask, (valueT) 0);
4278 if (count != 2 || p1 == -1 || p2 == -1)
4279 as_bad (_("Predicate source and target required"));
4280 else if (p1 == 0 || p2 == 0)
4281 as_bad (_("Use of p0 is not valid in this context"));
4283 add_qp_imply (p1, p2);
4288 as_bad (_("At least two PR arguments expected"));
4293 as_bad (_("Use of p0 is not valid in this context"));
4296 add_qp_mutex (mask);
4299 /* note that we don't override any existing relations */
4302 as_bad (_("At least one PR argument expected"));
4307 fprintf (stderr, "Safe across calls: ");
4308 print_prmask (mask);
4309 fprintf (stderr, "\n");
4311 qp_safe_across_calls = mask;
4314 demand_empty_rest_of_line ();
4317 /* .entry label [, label [, ...]]
4318 Hint to DV code that the given labels are to be considered entry points.
4319 Otherwise, only global labels are considered entry points. */
4332 name = input_line_pointer;
4333 c = get_symbol_end ();
4334 symbolP = symbol_find_or_make (name);
4336 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4338 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4341 *input_line_pointer = c;
4343 c = *input_line_pointer;
4346 input_line_pointer++;
4348 if (*input_line_pointer == '\n')
4354 demand_empty_rest_of_line ();
4357 /* .mem.offset offset, base
4358 "base" is used to distinguish between offsets from a different base. */
4361 dot_mem_offset (dummy)
4364 md.mem_offset.hint = 1;
4365 md.mem_offset.offset = get_absolute_expression ();
4366 if (*input_line_pointer != ',')
4368 as_bad (_("Comma expected"));
4369 ignore_rest_of_line ();
4372 ++input_line_pointer;
4373 md.mem_offset.base = get_absolute_expression ();
4374 demand_empty_rest_of_line ();
4377 /* ia64-specific pseudo-ops: */
4378 const pseudo_typeS md_pseudo_table[] =
4380 { "radix", dot_radix, 0 },
4381 { "lcomm", s_lcomm_bytes, 1 },
4382 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4383 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4384 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4385 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4386 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4387 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4388 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4389 { "proc", dot_proc, 0 },
4390 { "body", dot_body, 0 },
4391 { "prologue", dot_prologue, 0 },
4392 { "endp", dot_endp },
4393 { "file", dwarf2_directive_file },
4394 { "loc", dwarf2_directive_loc },
4396 { "fframe", dot_fframe },
4397 { "vframe", dot_vframe },
4398 { "vframesp", dot_vframesp },
4399 { "vframepsp", dot_vframepsp },
4400 { "save", dot_save },
4401 { "restore", dot_restore },
4402 { "restorereg", dot_restorereg },
4403 { "restorereg.p", dot_restorereg_p },
4404 { "handlerdata", dot_handlerdata },
4405 { "unwentry", dot_unwentry },
4406 { "altrp", dot_altrp },
4407 { "savesp", dot_savemem, 0 },
4408 { "savepsp", dot_savemem, 1 },
4409 { "save.g", dot_saveg },
4410 { "save.f", dot_savef },
4411 { "save.b", dot_saveb },
4412 { "save.gf", dot_savegf },
4413 { "spill", dot_spill },
4414 { "spillreg", dot_spillreg },
4415 { "spillsp", dot_spillmem, 0 },
4416 { "spillpsp", dot_spillmem, 1 },
4417 { "spillreg.p", dot_spillreg_p },
4418 { "spillsp.p", dot_spillmem_p, 0 },
4419 { "spillpsp.p", dot_spillmem_p, 1 },
4420 { "label_state", dot_label_state },
4421 { "copy_state", dot_copy_state },
4422 { "unwabi", dot_unwabi },
4423 { "personality", dot_personality },
4425 { "estate", dot_estate },
4427 { "mii", dot_template, 0x0 },
4428 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
4429 { "mlx", dot_template, 0x2 },
4430 { "mmi", dot_template, 0x4 },
4431 { "mfi", dot_template, 0x6 },
4432 { "mmf", dot_template, 0x7 },
4433 { "mib", dot_template, 0x8 },
4434 { "mbb", dot_template, 0x9 },
4435 { "bbb", dot_template, 0xb },
4436 { "mmb", dot_template, 0xc },
4437 { "mfb", dot_template, 0xe },
4439 { "lb", dot_scope, 0 },
4440 { "le", dot_scope, 1 },
4442 { "align", s_align_bytes, 0 },
4443 { "regstk", dot_regstk, 0 },
4444 { "rotr", dot_rot, DYNREG_GR },
4445 { "rotf", dot_rot, DYNREG_FR },
4446 { "rotp", dot_rot, DYNREG_PR },
4447 { "lsb", dot_byteorder, 0 },
4448 { "msb", dot_byteorder, 1 },
4449 { "psr", dot_psr, 0 },
4450 { "alias", dot_alias, 0 },
4451 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
4453 { "xdata1", dot_xdata, 1 },
4454 { "xdata2", dot_xdata, 2 },
4455 { "xdata4", dot_xdata, 4 },
4456 { "xdata8", dot_xdata, 8 },
4457 { "xreal4", dot_xfloat_cons, 'f' },
4458 { "xreal8", dot_xfloat_cons, 'd' },
4459 { "xreal10", dot_xfloat_cons, 'x' },
4460 { "xstring", dot_xstringer, 0 },
4461 { "xstringz", dot_xstringer, 1 },
4463 /* unaligned versions: */
4464 { "xdata2.ua", dot_xdata_ua, 2 },
4465 { "xdata4.ua", dot_xdata_ua, 4 },
4466 { "xdata8.ua", dot_xdata_ua, 8 },
4467 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
4468 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
4469 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
4471 /* annotations/DV checking support */
4472 { "entry", dot_entry, 0 },
4473 { "mem.offset", dot_mem_offset },
4474 { "pred.rel", dot_pred_rel, 0 },
4475 { "pred.rel.clear", dot_pred_rel, 'c' },
4476 { "pred.rel.imply", dot_pred_rel, 'i' },
4477 { "pred.rel.mutex", dot_pred_rel, 'm' },
4478 { "pred.safe_across_calls", dot_pred_rel, 's' },
4479 { "reg.val", dot_reg_val },
4480 { "auto", dot_dv_mode, 'a' },
4481 { "explicit", dot_dv_mode, 'e' },
4482 { "default", dot_dv_mode, 'd' },
4487 static const struct pseudo_opcode
4490 void (*handler) (int);
4495 /* these are more like pseudo-ops, but don't start with a dot */
4496 { "data1", cons, 1 },
4497 { "data2", cons, 2 },
4498 { "data4", cons, 4 },
4499 { "data8", cons, 8 },
4500 { "real4", stmt_float_cons, 'f' },
4501 { "real8", stmt_float_cons, 'd' },
4502 { "real10", stmt_float_cons, 'x' },
4503 { "string", stringer, 0 },
4504 { "stringz", stringer, 1 },
4506 /* unaligned versions: */
4507 { "data2.ua", stmt_cons_ua, 2 },
4508 { "data4.ua", stmt_cons_ua, 4 },
4509 { "data8.ua", stmt_cons_ua, 8 },
4510 { "real4.ua", float_cons, 'f' },
4511 { "real8.ua", float_cons, 'd' },
4512 { "real10.ua", float_cons, 'x' },
4515 /* Declare a register by creating a symbol for it and entering it in
4516 the symbol table. */
4519 declare_register (name, regnum)
4526 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
4528 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
4530 as_fatal ("Inserting \"%s\" into register table failed: %s",
4537 declare_register_set (prefix, num_regs, base_regnum)
4545 for (i = 0; i < num_regs; ++i)
4547 sprintf (name, "%s%u", prefix, i);
4548 declare_register (name, base_regnum + i);
4553 operand_width (opnd)
4554 enum ia64_opnd opnd;
4556 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
4557 unsigned int bits = 0;
4561 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
4562 bits += odesc->field[i].bits;
4568 operand_match (idesc, index, e)
4569 const struct ia64_opcode *idesc;
4573 enum ia64_opnd opnd = idesc->operands[index];
4574 int bits, relocatable = 0;
4575 struct insn_fix *fix;
4582 case IA64_OPND_AR_CCV:
4583 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
4587 case IA64_OPND_AR_PFS:
4588 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
4593 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
4598 if (e->X_op == O_register && e->X_add_number == REG_IP)
4603 if (e->X_op == O_register && e->X_add_number == REG_PR)
4607 case IA64_OPND_PR_ROT:
4608 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
4613 if (e->X_op == O_register && e->X_add_number == REG_PSR)
4617 case IA64_OPND_PSR_L:
4618 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
4622 case IA64_OPND_PSR_UM:
4623 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
4628 if (e->X_op == O_constant && e->X_add_number == 1)
4633 if (e->X_op == O_constant && e->X_add_number == 8)
4638 if (e->X_op == O_constant && e->X_add_number == 16)
4642 /* register operands: */
4645 if (e->X_op == O_register && e->X_add_number >= REG_AR
4646 && e->X_add_number < REG_AR + 128)
4652 if (e->X_op == O_register && e->X_add_number >= REG_BR
4653 && e->X_add_number < REG_BR + 8)
4658 if (e->X_op == O_register && e->X_add_number >= REG_CR
4659 && e->X_add_number < REG_CR + 128)
4667 if (e->X_op == O_register && e->X_add_number >= REG_FR
4668 && e->X_add_number < REG_FR + 128)
4674 if (e->X_op == O_register && e->X_add_number >= REG_P
4675 && e->X_add_number < REG_P + 64)
4682 if (e->X_op == O_register && e->X_add_number >= REG_GR
4683 && e->X_add_number < REG_GR + 128)
4687 case IA64_OPND_R3_2:
4688 if (e->X_op == O_register && e->X_add_number >= REG_GR
4689 && e->X_add_number < REG_GR + 4)
4693 /* indirect operands: */
4694 case IA64_OPND_CPUID_R3:
4695 case IA64_OPND_DBR_R3:
4696 case IA64_OPND_DTR_R3:
4697 case IA64_OPND_ITR_R3:
4698 case IA64_OPND_IBR_R3:
4699 case IA64_OPND_MSR_R3:
4700 case IA64_OPND_PKR_R3:
4701 case IA64_OPND_PMC_R3:
4702 case IA64_OPND_PMD_R3:
4703 case IA64_OPND_RR_R3:
4704 if (e->X_op == O_index && e->X_op_symbol
4705 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
4706 == opnd - IA64_OPND_CPUID_R3))
4711 if (e->X_op == O_index && !e->X_op_symbol)
4715 /* immediate operands: */
4716 case IA64_OPND_CNT2a:
4717 case IA64_OPND_LEN4:
4718 case IA64_OPND_LEN6:
4719 bits = operand_width (idesc->operands[index]);
4720 if (e->X_op == O_constant
4721 && (bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
4725 case IA64_OPND_CNT2b:
4726 if (e->X_op == O_constant
4727 && (bfd_vma) (e->X_add_number - 1) < 3)
4731 case IA64_OPND_CNT2c:
4732 val = e->X_add_number;
4733 if (e->X_op == O_constant
4734 && (val == 0 || val == 7 || val == 15 || val == 16))
4739 /* SOR must be an integer multiple of 8 */
4740 if (e->X_add_number & 0x7)
4744 if (e->X_op == O_constant &&
4745 (bfd_vma) e->X_add_number <= 96)
4749 case IA64_OPND_IMMU62:
4750 if (e->X_op == O_constant)
4752 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
4757 /* FIXME -- need 62-bit relocation type */
4758 as_bad (_("62-bit relocation not yet implemented"));
4762 case IA64_OPND_IMMU64:
4763 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
4764 || e->X_op == O_subtract)
4766 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
4767 fix->code = BFD_RELOC_IA64_IMM64;
4768 if (e->X_op != O_subtract)
4770 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
4771 if (e->X_op == O_pseudo_fixup)
4775 fix->opnd = idesc->operands[index];
4778 ++CURR_SLOT.num_fixups;
4781 else if (e->X_op == O_constant)
4785 case IA64_OPND_CCNT5:
4786 case IA64_OPND_CNT5:
4787 case IA64_OPND_CNT6:
4788 case IA64_OPND_CPOS6a:
4789 case IA64_OPND_CPOS6b:
4790 case IA64_OPND_CPOS6c:
4791 case IA64_OPND_IMMU2:
4792 case IA64_OPND_IMMU7a:
4793 case IA64_OPND_IMMU7b:
4794 case IA64_OPND_IMMU21:
4795 case IA64_OPND_IMMU24:
4796 case IA64_OPND_MBTYPE4:
4797 case IA64_OPND_MHTYPE8:
4798 case IA64_OPND_POS6:
4799 bits = operand_width (idesc->operands[index]);
4800 if (e->X_op == O_constant
4801 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
4805 case IA64_OPND_IMMU9:
4806 bits = operand_width (idesc->operands[index]);
4807 if (e->X_op == O_constant
4808 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
4810 int lobits = e->X_add_number & 0x3;
4811 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
4812 e->X_add_number |= (bfd_vma) 0x3;
4817 case IA64_OPND_IMM44:
4818 /* least 16 bits must be zero */
4819 if ((e->X_add_number & 0xffff) != 0)
4820 as_warn (_("lower 16 bits of mask ignored"));
4822 if (e->X_op == O_constant
4823 && ((e->X_add_number >= 0
4824 && e->X_add_number < ((bfd_vma) 1 << 44))
4825 || (e->X_add_number < 0
4826 && -e->X_add_number <= ((bfd_vma) 1 << 44))))
4829 if (e->X_add_number >= 0
4830 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
4832 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
4838 case IA64_OPND_IMM17:
4839 /* bit 0 is a don't care (pr0 is hardwired to 1) */
4840 if (e->X_op == O_constant
4841 && ((e->X_add_number >= 0
4842 && e->X_add_number < ((bfd_vma) 1 << 17))
4843 || (e->X_add_number < 0
4844 && -e->X_add_number <= ((bfd_vma) 1 << 17))))
4847 if (e->X_add_number >= 0
4848 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
4850 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
4856 case IA64_OPND_IMM14:
4857 case IA64_OPND_IMM22:
4859 case IA64_OPND_IMM1:
4860 case IA64_OPND_IMM8:
4861 case IA64_OPND_IMM8U4:
4862 case IA64_OPND_IMM8M1:
4863 case IA64_OPND_IMM8M1U4:
4864 case IA64_OPND_IMM8M1U8:
4865 case IA64_OPND_IMM9a:
4866 case IA64_OPND_IMM9b:
4867 bits = operand_width (idesc->operands[index]);
4868 if (relocatable && (e->X_op == O_symbol
4869 || e->X_op == O_subtract
4870 || e->X_op == O_pseudo_fixup))
4872 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
4874 if (idesc->operands[index] == IA64_OPND_IMM14)
4875 fix->code = BFD_RELOC_IA64_IMM14;
4877 fix->code = BFD_RELOC_IA64_IMM22;
4879 if (e->X_op != O_subtract)
4881 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
4882 if (e->X_op == O_pseudo_fixup)
4886 fix->opnd = idesc->operands[index];
4889 ++CURR_SLOT.num_fixups;
4892 else if (e->X_op != O_constant
4893 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
4896 if (opnd == IA64_OPND_IMM8M1U4)
4898 /* Zero is not valid for unsigned compares that take an adjusted
4899 constant immediate range. */
4900 if (e->X_add_number == 0)
4903 /* Sign-extend 32-bit unsigned numbers, so that the following range
4904 checks will work. */
4905 val = e->X_add_number;
4906 if (((val & (~(bfd_vma) 0 << 32)) == 0)
4907 && ((val & ((bfd_vma) 1 << 31)) != 0))
4908 val = ((val << 32) >> 32);
4910 /* Check for 0x100000000. This is valid because
4911 0x100000000-1 is the same as ((uint32_t) -1). */
4912 if (val == ((bfd_signed_vma) 1 << 32))
4917 else if (opnd == IA64_OPND_IMM8M1U8)
4919 /* Zero is not valid for unsigned compares that take an adjusted
4920 constant immediate range. */
4921 if (e->X_add_number == 0)
4924 /* Check for 0x10000000000000000. */
4925 if (e->X_op == O_big)
4927 if (generic_bignum[0] == 0
4928 && generic_bignum[1] == 0
4929 && generic_bignum[2] == 0
4930 && generic_bignum[3] == 0
4931 && generic_bignum[4] == 1)
4937 val = e->X_add_number - 1;
4939 else if (opnd == IA64_OPND_IMM8M1)
4940 val = e->X_add_number - 1;
4941 else if (opnd == IA64_OPND_IMM8U4)
4943 /* Sign-extend 32-bit unsigned numbers, so that the following range
4944 checks will work. */
4945 val = e->X_add_number;
4946 if (((val & (~(bfd_vma) 0 << 32)) == 0)
4947 && ((val & ((bfd_vma) 1 << 31)) != 0))
4948 val = ((val << 32) >> 32);
4951 val = e->X_add_number;
4953 if ((val >= 0 && val < ((bfd_vma) 1 << (bits - 1)))
4954 || (val < 0 && -val <= ((bfd_vma) 1 << (bits - 1))))
4958 case IA64_OPND_INC3:
4959 /* +/- 1, 4, 8, 16 */
4960 val = e->X_add_number;
4963 if (e->X_op == O_constant
4964 && (val == 1 || val == 4 || val == 8 || val == 16))
4968 case IA64_OPND_TGT25:
4969 case IA64_OPND_TGT25b:
4970 case IA64_OPND_TGT25c:
4971 case IA64_OPND_TGT64:
4972 if (e->X_op == O_symbol)
4974 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
4975 if (opnd == IA64_OPND_TGT25)
4976 fix->code = BFD_RELOC_IA64_PCREL21F;
4977 else if (opnd == IA64_OPND_TGT25b)
4978 fix->code = BFD_RELOC_IA64_PCREL21M;
4979 else if (opnd == IA64_OPND_TGT25c)
4980 fix->code = BFD_RELOC_IA64_PCREL21B;
4981 else if (opnd == IA64_OPND_TGT64)
4982 fix->code = BFD_RELOC_IA64_PCREL60B;
4986 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
4987 fix->opnd = idesc->operands[index];
4990 ++CURR_SLOT.num_fixups;
4993 case IA64_OPND_TAG13:
4994 case IA64_OPND_TAG13b:
5001 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5002 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, 0);
5003 fix->opnd = idesc->operands[index];
5006 ++CURR_SLOT.num_fixups;
5026 memset (e, 0, sizeof (*e));
5029 if (*input_line_pointer != '}')
5031 sep = *input_line_pointer++;
5035 if (!md.manual_bundling)
5036 as_warn ("Found '}' when manual bundling is off");
5038 CURR_SLOT.manual_bundling_off = 1;
5039 md.manual_bundling = 0;
5045 /* Returns the next entry in the opcode table that matches the one in
5046 IDESC, and frees the entry in IDESC. If no matching entry is
5047 found, NULL is returned instead. */
5049 static struct ia64_opcode *
5050 get_next_opcode (struct ia64_opcode *idesc)
5052 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5053 ia64_free_opcode (idesc);
5057 /* Parse the operands for the opcode and find the opcode variant that
5058 matches the specified operands, or NULL if no match is possible. */
5060 static struct ia64_opcode *
5061 parse_operands (idesc)
5062 struct ia64_opcode *idesc;
5064 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5066 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5068 char *first_arg = 0, *end, *saved_input_pointer;
5071 assert (strlen (idesc->name) <= 128);
5073 strcpy (mnemonic, idesc->name);
5074 if (idesc->operands[2] == IA64_OPND_SOF)
5076 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5077 can't parse the first operand until we have parsed the
5078 remaining operands of the "alloc" instruction. */
5080 first_arg = input_line_pointer;
5081 end = strchr (input_line_pointer, '=');
5084 as_bad ("Expected separator `='");
5087 input_line_pointer = end + 1;
5092 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5094 sep = parse_operand (CURR_SLOT.opnd + i);
5095 if (CURR_SLOT.opnd[i].X_op == O_absent)
5100 if (sep != '=' && sep != ',')
5105 if (num_outputs > 0)
5106 as_bad ("Duplicate equal sign (=) in instruction");
5108 num_outputs = i + 1;
5113 as_bad ("Illegal operand separator `%c'", sep);
5117 if (idesc->operands[2] == IA64_OPND_SOF)
5119 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5120 know (strcmp (idesc->name, "alloc") == 0);
5121 if (num_operands == 5 /* first_arg not included in this count! */
5122 && CURR_SLOT.opnd[2].X_op == O_constant
5123 && CURR_SLOT.opnd[3].X_op == O_constant
5124 && CURR_SLOT.opnd[4].X_op == O_constant
5125 && CURR_SLOT.opnd[5].X_op == O_constant)
5127 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5128 CURR_SLOT.opnd[3].X_add_number,
5129 CURR_SLOT.opnd[4].X_add_number,
5130 CURR_SLOT.opnd[5].X_add_number);
5132 /* now we can parse the first arg: */
5133 saved_input_pointer = input_line_pointer;
5134 input_line_pointer = first_arg;
5135 sep = parse_operand (CURR_SLOT.opnd + 0);
5137 --num_outputs; /* force error */
5138 input_line_pointer = saved_input_pointer;
5140 CURR_SLOT.opnd[2].X_add_number = sof;
5141 CURR_SLOT.opnd[3].X_add_number
5142 = sof - CURR_SLOT.opnd[4].X_add_number;
5143 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5147 highest_unmatched_operand = 0;
5148 expected_operand = idesc->operands[0];
5149 for (; idesc; idesc = get_next_opcode (idesc))
5151 if (num_outputs != idesc->num_outputs)
5152 continue; /* mismatch in # of outputs */
5154 CURR_SLOT.num_fixups = 0;
5155 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5156 if (!operand_match (idesc, i, CURR_SLOT.opnd + i))
5159 if (i != num_operands)
5161 if (i > highest_unmatched_operand)
5163 highest_unmatched_operand = i;
5164 expected_operand = idesc->operands[i];
5169 if (num_operands < NELEMS (idesc->operands)
5170 && idesc->operands[num_operands])
5171 continue; /* mismatch in number of arguments */
5177 if (expected_operand)
5178 as_bad ("Operand %u of `%s' should be %s",
5179 highest_unmatched_operand + 1, mnemonic,
5180 elf64_ia64_operands[expected_operand].desc);
5182 as_bad ("Operand mismatch");
5188 /* Keep track of state necessary to determine whether a NOP is necessary
5189 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5190 detect a case where additional NOPs may be necessary. */
5192 errata_nop_necessary_p (slot, insn_unit)
5194 enum ia64_unit insn_unit;
5197 struct group *this_group = md.last_groups + md.group_idx;
5198 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5199 struct ia64_opcode *idesc = slot->idesc;
5201 /* Test whether this could be the first insn in a problematic sequence. */
5202 if (insn_unit == IA64_UNIT_F)
5204 for (i = 0; i < idesc->num_outputs; i++)
5205 if (idesc->operands[i] == IA64_OPND_P1
5206 || idesc->operands[i] == IA64_OPND_P2)
5208 int regno = slot->opnd[i].X_add_number - REG_P;
5211 this_group->p_reg_set[regno] = 1;
5215 /* Test whether this could be the second insn in a problematic sequence. */
5216 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5217 && prev_group->p_reg_set[slot->qp_regno])
5219 for (i = 0; i < idesc->num_outputs; i++)
5220 if (idesc->operands[i] == IA64_OPND_R1
5221 || idesc->operands[i] == IA64_OPND_R2
5222 || idesc->operands[i] == IA64_OPND_R3)
5224 int regno = slot->opnd[i].X_add_number - REG_GR;
5227 if (strncmp (idesc->name, "add", 3) != 0
5228 && strncmp (idesc->name, "sub", 3) != 0
5229 && strncmp (idesc->name, "shladd", 6) != 0
5230 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5231 this_group->g_reg_set_conditionally[regno] = 1;
5235 /* Test whether this could be the third insn in a problematic sequence. */
5236 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5238 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5239 idesc->operands[i] == IA64_OPND_R3
5240 /* For mov indirect. */
5241 || idesc->operands[i] == IA64_OPND_RR_R3
5242 || idesc->operands[i] == IA64_OPND_DBR_R3
5243 || idesc->operands[i] == IA64_OPND_IBR_R3
5244 || idesc->operands[i] == IA64_OPND_PKR_R3
5245 || idesc->operands[i] == IA64_OPND_PMC_R3
5246 || idesc->operands[i] == IA64_OPND_PMD_R3
5247 || idesc->operands[i] == IA64_OPND_MSR_R3
5248 || idesc->operands[i] == IA64_OPND_CPUID_R3
5250 || idesc->operands[i] == IA64_OPND_ITR_R3
5251 || idesc->operands[i] == IA64_OPND_DTR_R3
5252 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5253 || idesc->operands[i] == IA64_OPND_MR3)
5255 int regno = slot->opnd[i].X_add_number - REG_GR;
5258 if (idesc->operands[i] == IA64_OPND_R3)
5260 if (strcmp (idesc->name, "fc") != 0
5261 && strcmp (idesc->name, "tak") != 0
5262 && strcmp (idesc->name, "thash") != 0
5263 && strcmp (idesc->name, "tpa") != 0
5264 && strcmp (idesc->name, "ttag") != 0
5265 && strncmp (idesc->name, "ptr", 3) != 0
5266 && strncmp (idesc->name, "ptc", 3) != 0
5267 && strncmp (idesc->name, "probe", 5) != 0)
5270 if (prev_group->g_reg_set_conditionally[regno])
5278 build_insn (slot, insnp)
5282 const struct ia64_operand *odesc, *o2desc;
5283 struct ia64_opcode *idesc = slot->idesc;
5284 bfd_signed_vma insn, val;
5288 insn = idesc->opcode | slot->qp_regno;
5290 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
5292 if (slot->opnd[i].X_op == O_register
5293 || slot->opnd[i].X_op == O_constant
5294 || slot->opnd[i].X_op == O_index)
5295 val = slot->opnd[i].X_add_number;
5296 else if (slot->opnd[i].X_op == O_big)
5298 /* This must be the value 0x10000000000000000. */
5299 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
5305 switch (idesc->operands[i])
5307 case IA64_OPND_IMMU64:
5308 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
5309 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
5310 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
5311 | (((val >> 63) & 0x1) << 36));
5314 case IA64_OPND_IMMU62:
5315 val &= 0x3fffffffffffffffULL;
5316 if (val != slot->opnd[i].X_add_number)
5317 as_warn (_("Value truncated to 62 bits"));
5318 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
5319 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
5322 case IA64_OPND_TGT64:
5324 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
5325 insn |= ((((val >> 59) & 0x1) << 36)
5326 | (((val >> 0) & 0xfffff) << 13));
5357 case IA64_OPND_R3_2:
5358 case IA64_OPND_CPUID_R3:
5359 case IA64_OPND_DBR_R3:
5360 case IA64_OPND_DTR_R3:
5361 case IA64_OPND_ITR_R3:
5362 case IA64_OPND_IBR_R3:
5364 case IA64_OPND_MSR_R3:
5365 case IA64_OPND_PKR_R3:
5366 case IA64_OPND_PMC_R3:
5367 case IA64_OPND_PMD_R3:
5368 case IA64_OPND_RR_R3:
5376 odesc = elf64_ia64_operands + idesc->operands[i];
5377 err = (*odesc->insert) (odesc, val, &insn);
5379 as_bad_where (slot->src_file, slot->src_line,
5380 "Bad operand value: %s", err);
5381 if (idesc->flags & IA64_OPCODE_PSEUDO)
5383 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
5384 && odesc == elf64_ia64_operands + IA64_OPND_F3)
5386 o2desc = elf64_ia64_operands + IA64_OPND_F2;
5387 (*o2desc->insert) (o2desc, val, &insn);
5389 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
5390 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
5391 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
5393 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
5394 (*o2desc->insert) (o2desc, 64 - val, &insn);
5404 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
5405 unsigned int manual_bundling = 0;
5406 enum ia64_unit required_unit, insn_unit = 0;
5407 enum ia64_insn_type type[3], insn_type;
5408 unsigned int template, orig_template;
5409 bfd_vma insn[3] = { -1, -1, -1 };
5410 struct ia64_opcode *idesc;
5411 int end_of_insn_group = 0, user_template = -1;
5412 int n, i, j, first, curr;
5413 unw_rec_list *ptr, *prev;
5414 bfd_vma t0 = 0, t1 = 0;
5415 struct label_fix *lfix;
5416 struct insn_fix *ifix;
5421 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
5422 know (first >= 0 & first < NUM_SLOTS);
5423 n = MIN (3, md.num_slots_in_use);
5425 /* Determine template: user user_template if specified, best match
5428 if (md.slot[first].user_template >= 0)
5429 user_template = template = md.slot[first].user_template;
5432 /* Auto select appropriate template. */
5433 memset (type, 0, sizeof (type));
5435 for (i = 0; i < n; ++i)
5437 if (md.slot[curr].label_fixups && i != 0)
5439 type[i] = md.slot[curr].idesc->type;
5440 curr = (curr + 1) % NUM_SLOTS;
5442 template = best_template[type[0]][type[1]][type[2]];
5445 /* initialize instructions with appropriate nops: */
5446 for (i = 0; i < 3; ++i)
5447 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
5451 /* now fill in slots with as many insns as possible: */
5453 idesc = md.slot[curr].idesc;
5454 end_of_insn_group = 0;
5455 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
5457 /* Set the slot number for prologue/body records now as those
5458 refer to the current point, not the point after the
5459 instruction has been issued: */
5460 /* Don't try to delete prologue/body records here, as that will cause
5461 them to also be deleted from the master list of unwind records. */
5462 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
5463 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
5464 || ptr->r.type == body)
5466 ptr->slot_number = (unsigned long) f + i;
5467 ptr->slot_frag = frag_now;
5470 if (idesc->flags & IA64_OPCODE_SLOT2)
5472 if (manual_bundling && i != 2)
5473 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5474 "`%s' must be last in bundle", idesc->name);
5478 if (idesc->flags & IA64_OPCODE_LAST)
5480 int required_slot, required_template;
5482 /* If we need a stop bit after an M slot, our only choice is
5483 template 5 (M;;MI). If we need a stop bit after a B
5484 slot, our only choice is to place it at the end of the
5485 bundle, because the only available templates are MIB,
5486 MBB, BBB, MMB, and MFB. We don't handle anything other
5487 than M and B slots because these are the only kind of
5488 instructions that can have the IA64_OPCODE_LAST bit set. */
5489 required_template = template;
5490 switch (idesc->type)
5494 required_template = 5;
5502 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5503 "Internal error: don't know how to force %s to end"
5504 "of instruction group", idesc->name);
5508 if (manual_bundling && i != required_slot)
5509 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5510 "`%s' must be last in instruction group",
5512 if (required_slot < i)
5513 /* Can't fit this instruction. */
5517 if (required_template != template)
5519 /* If we switch the template, we need to reset the NOPs
5520 after slot i. The slot-types of the instructions ahead
5521 of i never change, so we don't need to worry about
5522 changing NOPs in front of this slot. */
5523 for (j = i; j < 3; ++j)
5524 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
5526 template = required_template;
5528 if (curr != first && md.slot[curr].label_fixups)
5530 if (manual_bundling_on)
5531 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5532 "Label must be first in a bundle");
5533 /* This insn must go into the first slot of a bundle. */
5537 manual_bundling_on = md.slot[curr].manual_bundling_on;
5538 manual_bundling_off = md.slot[curr].manual_bundling_off;
5540 if (manual_bundling_on)
5543 manual_bundling = 1;
5545 break; /* need to start a new bundle */
5548 if (end_of_insn_group && md.num_slots_in_use >= 1)
5550 /* We need an instruction group boundary in the middle of a
5551 bundle. See if we can switch to an other template with
5552 an appropriate boundary. */
5554 orig_template = template;
5555 if (i == 1 && (user_template == 4
5556 || (user_template < 0
5557 && (ia64_templ_desc[template].exec_unit[0]
5561 end_of_insn_group = 0;
5563 else if (i == 2 && (user_template == 0
5564 || (user_template < 0
5565 && (ia64_templ_desc[template].exec_unit[1]
5567 /* This test makes sure we don't switch the template if
5568 the next instruction is one that needs to be first in
5569 an instruction group. Since all those instructions are
5570 in the M group, there is no way such an instruction can
5571 fit in this bundle even if we switch the template. The
5572 reason we have to check for this is that otherwise we
5573 may end up generating "MI;;I M.." which has the deadly
5574 effect that the second M instruction is no longer the
5575 first in the bundle! --davidm 99/12/16 */
5576 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
5579 end_of_insn_group = 0;
5581 else if (curr != first)
5582 /* can't fit this insn */
5585 if (template != orig_template)
5586 /* if we switch the template, we need to reset the NOPs
5587 after slot i. The slot-types of the instructions ahead
5588 of i never change, so we don't need to worry about
5589 changing NOPs in front of this slot. */
5590 for (j = i; j < 3; ++j)
5591 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
5593 required_unit = ia64_templ_desc[template].exec_unit[i];
5595 /* resolve dynamic opcodes such as "break" and "nop": */
5596 if (idesc->type == IA64_TYPE_DYN)
5598 if ((strcmp (idesc->name, "nop") == 0)
5599 || (strcmp (idesc->name, "break") == 0))
5600 insn_unit = required_unit;
5601 else if (strcmp (idesc->name, "chk.s") == 0)
5603 insn_unit = IA64_UNIT_M;
5604 if (required_unit == IA64_UNIT_I)
5605 insn_unit = IA64_UNIT_I;
5608 as_fatal ("emit_one_bundle: unexpected dynamic op");
5610 sprintf (mnemonic, "%s.%c", idesc->name, "?imbf??"[insn_unit]);
5611 ia64_free_opcode (idesc);
5612 md.slot[curr].idesc = idesc = ia64_find_opcode (mnemonic);
5614 know (!idesc->next); /* no resolved dynamic ops have collisions */
5619 insn_type = idesc->type;
5620 insn_unit = IA64_UNIT_NIL;
5624 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
5625 insn_unit = required_unit;
5627 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
5628 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
5629 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
5630 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
5631 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
5636 if (insn_unit != required_unit)
5638 if (required_unit == IA64_UNIT_L
5639 && insn_unit == IA64_UNIT_I
5640 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
5642 /* we got ourselves an MLX template but the current
5643 instruction isn't an X-unit, or an I-unit instruction
5644 that can go into the X slot of an MLX template. Duh. */
5645 if (md.num_slots_in_use >= NUM_SLOTS)
5647 as_bad_where (md.slot[curr].src_file,
5648 md.slot[curr].src_line,
5649 "`%s' can't go in X slot of "
5650 "MLX template", idesc->name);
5651 /* drop this insn so we don't livelock: */
5652 --md.num_slots_in_use;
5656 continue; /* try next slot */
5662 addr = frag_now->fr_address + frag_now_fix () - 16 + i;
5663 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
5666 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
5667 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
5669 build_insn (md.slot + curr, insn + i);
5671 /* Set slot counts for non prologue/body unwind records. */
5672 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
5673 if (ptr->r.type != prologue && ptr->r.type != prologue_gr
5674 && ptr->r.type != body)
5676 ptr->slot_number = (unsigned long) f + i;
5677 ptr->slot_frag = frag_now;
5679 md.slot[curr].unwind_record = NULL;
5681 if (required_unit == IA64_UNIT_L)
5684 /* skip one slot for long/X-unit instructions */
5687 --md.num_slots_in_use;
5689 /* now is a good time to fix up the labels for this insn: */
5690 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
5692 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
5693 symbol_set_frag (lfix->sym, frag_now);
5695 /* and fix up the tags also. */
5696 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
5698 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
5699 symbol_set_frag (lfix->sym, frag_now);
5702 for (j = 0; j < md.slot[curr].num_fixups; ++j)
5704 ifix = md.slot[curr].fixup + j;
5705 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
5706 &ifix->expr, ifix->is_pcrel, ifix->code);
5707 fix->tc_fix_data.opnd = ifix->opnd;
5708 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
5709 fix->fx_file = md.slot[curr].src_file;
5710 fix->fx_line = md.slot[curr].src_line;
5713 end_of_insn_group = md.slot[curr].end_of_insn_group;
5715 if (end_of_insn_group)
5717 md.group_idx = (md.group_idx + 1) % 3;
5718 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
5722 ia64_free_opcode (md.slot[curr].idesc);
5723 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
5724 md.slot[curr].user_template = -1;
5726 if (manual_bundling_off)
5728 manual_bundling = 0;
5731 curr = (curr + 1) % NUM_SLOTS;
5732 idesc = md.slot[curr].idesc;
5734 if (manual_bundling)
5736 if (md.num_slots_in_use > 0)
5737 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5738 "`%s' does not fit into %s template",
5739 idesc->name, ia64_templ_desc[template].name);
5741 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5742 "Missing '}' at end of file");
5744 know (md.num_slots_in_use < NUM_SLOTS);
5746 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
5747 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
5749 number_to_chars_littleendian (f + 0, t0, 8);
5750 number_to_chars_littleendian (f + 8, t1, 8);
5752 unwind.next_slot_number = (unsigned long) f + 16;
5753 unwind.next_slot_frag = frag_now;
5757 md_parse_option (c, arg)
5763 /* Switches from the Intel assembler. */
5765 if (strcmp (arg, "ilp64") == 0
5766 || strcmp (arg, "lp64") == 0
5767 || strcmp (arg, "p64") == 0)
5769 md.flags |= EF_IA_64_ABI64;
5771 else if (strcmp (arg, "ilp32") == 0)
5773 md.flags &= ~EF_IA_64_ABI64;
5775 else if (strcmp (arg, "le") == 0)
5777 md.flags &= ~EF_IA_64_BE;
5779 else if (strcmp (arg, "be") == 0)
5781 md.flags |= EF_IA_64_BE;
5788 if (strcmp (arg, "so") == 0)
5790 /* Suppress signon message. */
5792 else if (strcmp (arg, "pi") == 0)
5794 /* Reject privileged instructions. FIXME */
5796 else if (strcmp (arg, "us") == 0)
5798 /* Allow union of signed and unsigned range. FIXME */
5800 else if (strcmp (arg, "close_fcalls") == 0)
5802 /* Do not resolve global function calls. */
5809 /* temp[="prefix"] Insert temporary labels into the object file
5810 symbol table prefixed by "prefix".
5811 Default prefix is ":temp:".
5816 /* ??? Conflicts with gas' listing option. */
5817 /* indirect=<tgt> Assume unannotated indirect branches behavior
5818 according to <tgt> --
5819 exit: branch out from the current context (default)
5820 labels: all labels in context may be branch targets
5825 /* -X conflicts with an ignored option, use -x instead */
5827 if (!arg || strcmp (arg, "explicit") == 0)
5829 /* set default mode to explicit */
5830 md.default_explicit_mode = 1;
5833 else if (strcmp (arg, "auto") == 0)
5835 md.default_explicit_mode = 0;
5837 else if (strcmp (arg, "debug") == 0)
5841 else if (strcmp (arg, "debugx") == 0)
5843 md.default_explicit_mode = 1;
5848 as_bad (_("Unrecognized option '-x%s'"), arg);
5853 /* nops Print nops statistics. */
5856 /* GNU specific switches for gcc. */
5857 case OPTION_MCONSTANT_GP:
5858 md.flags |= EF_IA_64_CONS_GP;
5861 case OPTION_MAUTO_PIC:
5862 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
5873 md_show_usage (stream)
5878 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
5879 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
5880 -x | -xexplicit turn on dependency violation checking (default)\n\
5881 -xauto automagically remove dependency violations\n\
5882 -xdebug debug dependency violation checker\n"),
5886 /* Return true if TYPE fits in TEMPL at SLOT. */
5889 match (int templ, int type, int slot)
5891 enum ia64_unit unit;
5894 unit = ia64_templ_desc[templ].exec_unit[slot];
5897 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
5899 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
5901 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
5902 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
5903 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
5904 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
5905 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
5906 default: result = 0; break;
5911 /* Add a bit of extra goodness if a nop of type F or B would fit
5912 in TEMPL at SLOT. */
5915 extra_goodness (int templ, int slot)
5917 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
5919 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
5924 /* This function is called once, at assembler startup time. It sets
5925 up all the tables, etc. that the MD part of the assembler will need
5926 that can be determined before arguments are parsed. */
5930 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
5935 md.explicit_mode = md.default_explicit_mode;
5937 bfd_set_section_alignment (stdoutput, text_section, 4);
5939 target_big_endian = TARGET_BYTES_BIG_ENDIAN;
5940 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
5941 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
5942 &zero_address_frag);
5944 pseudo_func[FUNC_GP_RELATIVE].u.sym =
5945 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
5946 &zero_address_frag);
5948 pseudo_func[FUNC_LT_RELATIVE].u.sym =
5949 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
5950 &zero_address_frag);
5952 pseudo_func[FUNC_PC_RELATIVE].u.sym =
5953 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
5954 &zero_address_frag);
5956 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
5957 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
5958 &zero_address_frag);
5960 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
5961 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
5962 &zero_address_frag);
5964 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
5965 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
5966 &zero_address_frag);
5968 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
5969 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
5970 &zero_address_frag);
5972 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
5973 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
5974 &zero_address_frag);
5976 /* Compute the table of best templates. We compute goodness as a
5977 base 4 value, in which each match counts for 3, each F counts
5978 for 2, each B counts for 1. This should maximize the number of
5979 F and B nops in the chosen bundles, which is good because these
5980 pipelines are least likely to be overcommitted. */
5981 for (i = 0; i < IA64_NUM_TYPES; ++i)
5982 for (j = 0; j < IA64_NUM_TYPES; ++j)
5983 for (k = 0; k < IA64_NUM_TYPES; ++k)
5986 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
5989 if (match (t, i, 0))
5991 if (match (t, j, 1))
5993 if (match (t, k, 2))
5994 goodness = 3 + 3 + 3;
5996 goodness = 3 + 3 + extra_goodness (t, 2);
5998 else if (match (t, j, 2))
5999 goodness = 3 + 3 + extra_goodness (t, 1);
6003 goodness += extra_goodness (t, 1);
6004 goodness += extra_goodness (t, 2);
6007 else if (match (t, i, 1))
6009 if (match (t, j, 2))
6012 goodness = 3 + extra_goodness (t, 2);
6014 else if (match (t, i, 2))
6015 goodness = 3 + extra_goodness (t, 1);
6017 if (goodness > best)
6020 best_template[i][j][k] = t;
6025 for (i = 0; i < NUM_SLOTS; ++i)
6026 md.slot[i].user_template = -1;
6028 md.pseudo_hash = hash_new ();
6029 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6031 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6032 (void *) (pseudo_opcode + i));
6034 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6035 pseudo_opcode[i].name, err);
6038 md.reg_hash = hash_new ();
6039 md.dynreg_hash = hash_new ();
6040 md.const_hash = hash_new ();
6041 md.entry_hash = hash_new ();
6043 /* general registers: */
6046 for (i = 0; i < total; ++i)
6048 sprintf (name, "r%d", i - REG_GR);
6049 md.regsym[i] = declare_register (name, i);
6052 /* floating point registers: */
6054 for (; i < total; ++i)
6056 sprintf (name, "f%d", i - REG_FR);
6057 md.regsym[i] = declare_register (name, i);
6060 /* application registers: */
6063 for (; i < total; ++i)
6065 sprintf (name, "ar%d", i - REG_AR);
6066 md.regsym[i] = declare_register (name, i);
6069 /* control registers: */
6072 for (; i < total; ++i)
6074 sprintf (name, "cr%d", i - REG_CR);
6075 md.regsym[i] = declare_register (name, i);
6078 /* predicate registers: */
6080 for (; i < total; ++i)
6082 sprintf (name, "p%d", i - REG_P);
6083 md.regsym[i] = declare_register (name, i);
6086 /* branch registers: */
6088 for (; i < total; ++i)
6090 sprintf (name, "b%d", i - REG_BR);
6091 md.regsym[i] = declare_register (name, i);
6094 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6095 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6096 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6097 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6098 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6099 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6100 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6102 for (i = 0; i < NELEMS (indirect_reg); ++i)
6104 regnum = indirect_reg[i].regnum;
6105 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6108 /* define synonyms for application registers: */
6109 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6110 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6111 REG_AR + ar[i - REG_AR].regnum);
6113 /* define synonyms for control registers: */
6114 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6115 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6116 REG_CR + cr[i - REG_CR].regnum);
6118 declare_register ("gp", REG_GR + 1);
6119 declare_register ("sp", REG_GR + 12);
6120 declare_register ("rp", REG_BR + 0);
6122 /* pseudo-registers used to specify unwind info: */
6123 declare_register ("psp", REG_PSP);
6125 declare_register_set ("ret", 4, REG_GR + 8);
6126 declare_register_set ("farg", 8, REG_FR + 8);
6127 declare_register_set ("fret", 8, REG_FR + 8);
6129 for (i = 0; i < NELEMS (const_bits); ++i)
6131 err = hash_insert (md.const_hash, const_bits[i].name,
6132 (PTR) (const_bits + i));
6134 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6138 /* Set the architecture and machine depending on defaults and command line
6140 if (md.flags & EF_IA_64_ABI64)
6141 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
6143 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
6146 as_warn (_("Could not set architecture and machine"));
6148 md.mem_offset.hint = 0;
6151 md.entry_labels = NULL;
6154 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
6155 because that is called after md_parse_option which is where we do the
6156 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
6157 default endianness. */
6160 ia64_init (argc, argv)
6164 md.flags = EF_IA_64_ABI64;
6165 if (TARGET_BYTES_BIG_ENDIAN)
6166 md.flags |= EF_IA_64_BE;
6169 /* Return a string for the target object file format. */
6172 ia64_target_format ()
6174 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
6176 if (md.flags & EF_IA_64_BE)
6178 if (md.flags & EF_IA_64_ABI64)
6179 return "elf64-ia64-big";
6181 return "elf32-ia64-big";
6185 if (md.flags & EF_IA_64_ABI64)
6186 return "elf64-ia64-little";
6188 return "elf32-ia64-little";
6192 return "unknown-format";
6196 ia64_end_of_source ()
6198 /* terminate insn group upon reaching end of file: */
6199 insn_group_break (1, 0, 0);
6201 /* emits slots we haven't written yet: */
6202 ia64_flush_insns ();
6204 bfd_set_private_flags (stdoutput, md.flags);
6206 md.mem_offset.hint = 0;
6212 if (md.qp.X_op == O_register)
6213 as_bad ("qualifying predicate not followed by instruction");
6214 md.qp.X_op = O_absent;
6216 if (ignore_input ())
6219 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
6221 if (md.detect_dv && !md.explicit_mode)
6222 as_warn (_("Explicit stops are ignored in auto mode"));
6224 insn_group_break (1, 0, 0);
6228 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
6230 static int defining_tag = 0;
6233 ia64_unrecognized_line (ch)
6239 expression (&md.qp);
6240 if (*input_line_pointer++ != ')')
6242 as_bad ("Expected ')'");
6245 if (md.qp.X_op != O_register)
6247 as_bad ("Qualifying predicate expected");
6250 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
6252 as_bad ("Predicate register expected");
6258 if (md.manual_bundling)
6259 as_warn ("Found '{' when manual bundling is already turned on");
6261 CURR_SLOT.manual_bundling_on = 1;
6262 md.manual_bundling = 1;
6264 /* Bundling is only acceptable in explicit mode
6265 or when in default automatic mode. */
6266 if (md.detect_dv && !md.explicit_mode)
6268 if (!md.mode_explicitly_set
6269 && !md.default_explicit_mode)
6272 as_warn (_("Found '{' after explicit switch to automatic mode"));
6277 if (!md.manual_bundling)
6278 as_warn ("Found '}' when manual bundling is off");
6280 PREV_SLOT.manual_bundling_off = 1;
6281 md.manual_bundling = 0;
6283 /* switch back to automatic mode, if applicable */
6286 && !md.mode_explicitly_set
6287 && !md.default_explicit_mode)
6290 /* Allow '{' to follow on the same line. We also allow ";;", but that
6291 happens automatically because ';' is an end of line marker. */
6293 if (input_line_pointer[0] == '{')
6295 input_line_pointer++;
6296 return ia64_unrecognized_line ('{');
6299 demand_empty_rest_of_line ();
6308 if (md.qp.X_op == O_register)
6310 as_bad ("Tag must come before qualifying predicate.");
6313 s = input_line_pointer;
6314 c = get_symbol_end ();
6317 /* Put ':' back for error messages' sake. */
6318 *input_line_pointer++ = ':';
6319 as_bad ("Expected ':'");
6325 /* Put ':' back for error messages' sake. */
6326 *input_line_pointer++ = ':';
6327 if (*input_line_pointer++ != ']')
6329 as_bad ("Expected ']'");
6334 as_bad ("Tag name expected");
6344 /* Not a valid line. */
6349 ia64_frob_label (sym)
6352 struct label_fix *fix;
6354 /* Tags need special handling since they are not bundle breaks like
6358 fix = obstack_alloc (¬es, sizeof (*fix));
6360 fix->next = CURR_SLOT.tag_fixups;
6361 CURR_SLOT.tag_fixups = fix;
6366 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6368 md.last_text_seg = now_seg;
6369 fix = obstack_alloc (¬es, sizeof (*fix));
6371 fix->next = CURR_SLOT.label_fixups;
6372 CURR_SLOT.label_fixups = fix;
6374 /* Keep track of how many code entry points we've seen. */
6375 if (md.path == md.maxpaths)
6378 md.entry_labels = (const char **)
6379 xrealloc ((void *) md.entry_labels,
6380 md.maxpaths * sizeof (char *));
6382 md.entry_labels[md.path++] = S_GET_NAME (sym);
6387 ia64_flush_pending_output ()
6389 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6391 /* ??? This causes many unnecessary stop bits to be emitted.
6392 Unfortunately, it isn't clear if it is safe to remove this. */
6393 insn_group_break (1, 0, 0);
6394 ia64_flush_insns ();
6398 /* Do ia64-specific expression optimization. All that's done here is
6399 to transform index expressions that are either due to the indexing
6400 of rotating registers or due to the indexing of indirect register
6403 ia64_optimize_expr (l, op, r)
6412 if (l->X_op == O_register && r->X_op == O_constant)
6414 num_regs = (l->X_add_number >> 16);
6415 if ((unsigned) r->X_add_number >= num_regs)
6418 as_bad ("No current frame");
6420 as_bad ("Index out of range 0..%u", num_regs - 1);
6421 r->X_add_number = 0;
6423 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
6426 else if (l->X_op == O_register && r->X_op == O_register)
6428 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
6429 || l->X_add_number == IND_MEM)
6431 as_bad ("Indirect register set name expected");
6432 l->X_add_number = IND_CPUID;
6435 l->X_op_symbol = md.regsym[l->X_add_number];
6436 l->X_add_number = r->X_add_number;
6444 ia64_parse_name (name, e)
6448 struct const_desc *cdesc;
6449 struct dynreg *dr = 0;
6450 unsigned int regnum;
6454 /* first see if NAME is a known register name: */
6455 sym = hash_find (md.reg_hash, name);
6458 e->X_op = O_register;
6459 e->X_add_number = S_GET_VALUE (sym);
6463 cdesc = hash_find (md.const_hash, name);
6466 e->X_op = O_constant;
6467 e->X_add_number = cdesc->value;
6471 /* check for inN, locN, or outN: */
6475 if (name[1] == 'n' && isdigit (name[2]))
6483 if (name[1] == 'o' && name[2] == 'c' && isdigit (name[3]))
6491 if (name[1] == 'u' && name[2] == 't' && isdigit (name[3]))
6504 /* The name is inN, locN, or outN; parse the register number. */
6505 regnum = strtoul (name, &end, 10);
6506 if (end > name && *end == '\0')
6508 if ((unsigned) regnum >= dr->num_regs)
6511 as_bad ("No current frame");
6513 as_bad ("Register number out of range 0..%u",
6517 e->X_op = O_register;
6518 e->X_add_number = dr->base + regnum;
6523 if ((dr = hash_find (md.dynreg_hash, name)))
6525 /* We've got ourselves the name of a rotating register set.
6526 Store the base register number in the low 16 bits of
6527 X_add_number and the size of the register set in the top 16
6529 e->X_op = O_register;
6530 e->X_add_number = dr->base | (dr->num_regs << 16);
6536 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
6539 ia64_canonicalize_symbol_name (name)
6542 size_t len = strlen (name);
6543 if (len > 1 && name[len - 1] == '#')
6544 name[len - 1] = '\0';
6549 is_conditional_branch (idesc)
6550 struct ia64_opcode *idesc;
6552 return (strncmp (idesc->name, "br", 2) == 0
6553 && (strcmp (idesc->name, "br") == 0
6554 || strncmp (idesc->name, "br.cond", 7) == 0
6555 || strncmp (idesc->name, "br.call", 7) == 0
6556 || strncmp (idesc->name, "br.ret", 6) == 0
6557 || strcmp (idesc->name, "brl") == 0
6558 || strncmp (idesc->name, "brl.cond", 7) == 0
6559 || strncmp (idesc->name, "brl.call", 7) == 0
6560 || strncmp (idesc->name, "brl.ret", 6) == 0));
6563 /* Return whether the given opcode is a taken branch. If there's any doubt,
6567 is_taken_branch (idesc)
6568 struct ia64_opcode *idesc;
6570 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
6571 || strncmp (idesc->name, "br.ia", 5) == 0);
6574 /* Return whether the given opcode is an interruption or rfi. If there's any
6575 doubt, returns zero. */
6578 is_interruption_or_rfi (idesc)
6579 struct ia64_opcode *idesc;
6581 if (strcmp (idesc->name, "rfi") == 0)
6586 /* Returns the index of the given dependency in the opcode's list of chks, or
6587 -1 if there is no dependency. */
6590 depends_on (depind, idesc)
6592 struct ia64_opcode *idesc;
6595 const struct ia64_opcode_dependency *dep = idesc->dependencies;
6596 for (i = 0; i < dep->nchks; i++)
6598 if (depind == DEP (dep->chks[i]))
6604 /* Determine a set of specific resources used for a particular resource
6605 class. Returns the number of specific resources identified For those
6606 cases which are not determinable statically, the resource returned is
6609 Meanings of value in 'NOTE':
6610 1) only read/write when the register number is explicitly encoded in the
6612 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
6613 accesses CFM when qualifying predicate is in the rotating region.
6614 3) general register value is used to specify an indirect register; not
6615 determinable statically.
6616 4) only read the given resource when bits 7:0 of the indirect index
6617 register value does not match the register number of the resource; not
6618 determinable statically.
6619 5) all rules are implementation specific.
6620 6) only when both the index specified by the reader and the index specified
6621 by the writer have the same value in bits 63:61; not determinable
6623 7) only access the specified resource when the corresponding mask bit is
6625 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
6626 only read when these insns reference FR2-31
6627 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
6628 written when these insns write FR32-127
6629 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
6631 11) The target predicates are written independently of PR[qp], but source
6632 registers are only read if PR[qp] is true. Since the state of PR[qp]
6633 cannot statically be determined, all source registers are marked used.
6634 12) This insn only reads the specified predicate register when that
6635 register is the PR[qp].
6636 13) This reference to ld-c only applies to teh GR whose value is loaded
6637 with data returned from memory, not the post-incremented address register.
6638 14) The RSE resource includes the implementation-specific RSE internal
6639 state resources. At least one (and possibly more) of these resources are
6640 read by each instruction listed in IC:rse-readers. At least one (and
6641 possibly more) of these resources are written by each insn listed in
6643 15+16) Represents reserved instructions, which the assembler does not
6646 Memory resources (i.e. locations in memory) are *not* marked or tracked by
6647 this code; there are no dependency violations based on memory access.
6650 #define MAX_SPECS 256
6655 specify_resource (dep, idesc, type, specs, note, path)
6656 const struct ia64_dependency *dep;
6657 struct ia64_opcode *idesc;
6658 int type; /* is this a DV chk or a DV reg? */
6659 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
6660 int note; /* resource note for this insn's usage */
6661 int path; /* which execution path to examine */
6668 if (dep->mode == IA64_DV_WAW
6669 || (dep->mode == IA64_DV_RAW && type == DV_REG)
6670 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
6673 /* template for any resources we identify */
6674 tmpl.dependency = dep;
6676 tmpl.insn_srlz = tmpl.data_srlz = 0;
6677 tmpl.qp_regno = CURR_SLOT.qp_regno;
6678 tmpl.link_to_qp_branch = 1;
6679 tmpl.mem_offset.hint = 0;
6682 tmpl.cmp_type = CMP_NONE;
6685 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
6686 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
6687 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
6689 /* we don't need to track these */
6690 if (dep->semantics == IA64_DVS_NONE)
6693 switch (dep->specifier)
6698 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
6700 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
6701 if (regno >= 0 && regno <= 7)
6703 specs[count] = tmpl;
6704 specs[count++].index = regno;
6710 for (i = 0; i < 8; i++)
6712 specs[count] = tmpl;
6713 specs[count++].index = i;
6722 case IA64_RS_AR_UNAT:
6723 /* This is a mov =AR or mov AR= instruction. */
6724 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
6726 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
6727 if (regno == AR_UNAT)
6729 specs[count++] = tmpl;
6734 /* This is a spill/fill, or other instruction that modifies the
6737 /* Unless we can determine the specific bits used, mark the whole
6738 thing; bits 8:3 of the memory address indicate the bit used in
6739 UNAT. The .mem.offset hint may be used to eliminate a small
6740 subset of conflicts. */
6741 specs[count] = tmpl;
6742 if (md.mem_offset.hint)
6745 fprintf (stderr, " Using hint for spill/fill\n");
6746 /* The index isn't actually used, just set it to something
6747 approximating the bit index. */
6748 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
6749 specs[count].mem_offset.hint = 1;
6750 specs[count].mem_offset.offset = md.mem_offset.offset;
6751 specs[count++].mem_offset.base = md.mem_offset.base;
6755 specs[count++].specific = 0;
6763 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
6765 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
6766 if ((regno >= 8 && regno <= 15)
6767 || (regno >= 20 && regno <= 23)
6768 || (regno >= 31 && regno <= 39)
6769 || (regno >= 41 && regno <= 47)
6770 || (regno >= 67 && regno <= 111))
6772 specs[count] = tmpl;
6773 specs[count++].index = regno;
6786 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
6788 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
6789 if ((regno >= 48 && regno <= 63)
6790 || (regno >= 112 && regno <= 127))
6792 specs[count] = tmpl;
6793 specs[count++].index = regno;
6799 for (i = 48; i < 64; i++)
6801 specs[count] = tmpl;
6802 specs[count++].index = i;
6804 for (i = 112; i < 128; i++)
6806 specs[count] = tmpl;
6807 specs[count++].index = i;
6825 for (i = 0; i < idesc->num_outputs; i++)
6826 if (idesc->operands[i] == IA64_OPND_B1
6827 || idesc->operands[i] == IA64_OPND_B2)
6829 specs[count] = tmpl;
6830 specs[count++].index =
6831 CURR_SLOT.opnd[i].X_add_number - REG_BR;
6836 for (i = idesc->num_outputs;i < NELEMS (idesc->operands); i++)
6837 if (idesc->operands[i] == IA64_OPND_B1
6838 || idesc->operands[i] == IA64_OPND_B2)
6840 specs[count] = tmpl;
6841 specs[count++].index =
6842 CURR_SLOT.opnd[i].X_add_number - REG_BR;
6848 case IA64_RS_CPUID: /* four or more registers */
6851 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
6853 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
6854 if (regno >= 0 && regno < NELEMS (gr_values)
6857 specs[count] = tmpl;
6858 specs[count++].index = gr_values[regno].value & 0xFF;
6862 specs[count] = tmpl;
6863 specs[count++].specific = 0;
6873 case IA64_RS_DBR: /* four or more registers */
6876 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
6878 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
6879 if (regno >= 0 && regno < NELEMS (gr_values)
6882 specs[count] = tmpl;
6883 specs[count++].index = gr_values[regno].value & 0xFF;
6887 specs[count] = tmpl;
6888 specs[count++].specific = 0;
6892 else if (note == 0 && !rsrc_write)
6894 specs[count] = tmpl;
6895 specs[count++].specific = 0;
6903 case IA64_RS_IBR: /* four or more registers */
6906 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
6908 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
6909 if (regno >= 0 && regno < NELEMS (gr_values)
6912 specs[count] = tmpl;
6913 specs[count++].index = gr_values[regno].value & 0xFF;
6917 specs[count] = tmpl;
6918 specs[count++].specific = 0;
6931 /* These are implementation specific. Force all references to
6932 conflict with all other references. */
6933 specs[count] = tmpl;
6934 specs[count++].specific = 0;
6942 case IA64_RS_PKR: /* 16 or more registers */
6943 if (note == 3 || note == 4)
6945 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
6947 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
6948 if (regno >= 0 && regno < NELEMS (gr_values)
6953 specs[count] = tmpl;
6954 specs[count++].index = gr_values[regno].value & 0xFF;
6957 for (i = 0; i < NELEMS (gr_values); i++)
6959 /* Uses all registers *except* the one in R3. */
6960 if (i != (gr_values[regno].value & 0xFF))
6962 specs[count] = tmpl;
6963 specs[count++].index = i;
6969 specs[count] = tmpl;
6970 specs[count++].specific = 0;
6977 specs[count] = tmpl;
6978 specs[count++].specific = 0;
6982 case IA64_RS_PMC: /* four or more registers */
6985 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
6986 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
6989 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
6991 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
6992 if (regno >= 0 && regno < NELEMS (gr_values)
6995 specs[count] = tmpl;
6996 specs[count++].index = gr_values[regno].value & 0xFF;
7000 specs[count] = tmpl;
7001 specs[count++].specific = 0;
7011 case IA64_RS_PMD: /* four or more registers */
7014 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7016 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7017 if (regno >= 0 && regno < NELEMS (gr_values)
7020 specs[count] = tmpl;
7021 specs[count++].index = gr_values[regno].value & 0xFF;
7025 specs[count] = tmpl;
7026 specs[count++].specific = 0;
7036 case IA64_RS_RR: /* eight registers */
7039 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7041 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7042 if (regno >= 0 && regno < NELEMS (gr_values)
7045 specs[count] = tmpl;
7046 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7050 specs[count] = tmpl;
7051 specs[count++].specific = 0;
7055 else if (note == 0 && !rsrc_write)
7057 specs[count] = tmpl;
7058 specs[count++].specific = 0;
7066 case IA64_RS_CR_IRR:
7069 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
7070 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
7072 && idesc->operands[1] == IA64_OPND_CR3
7075 for (i = 0; i < 4; i++)
7077 specs[count] = tmpl;
7078 specs[count++].index = CR_IRR0 + i;
7084 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7085 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7087 && regno <= CR_IRR3)
7089 specs[count] = tmpl;
7090 specs[count++].index = regno;
7099 case IA64_RS_CR_LRR:
7106 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7107 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7108 && (regno == CR_LRR0 || regno == CR_LRR1))
7110 specs[count] = tmpl;
7111 specs[count++].index = regno;
7119 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
7121 specs[count] = tmpl;
7122 specs[count++].index =
7123 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7138 else if (rsrc_write)
7140 if (dep->specifier == IA64_RS_FRb
7141 && idesc->operands[0] == IA64_OPND_F1)
7143 specs[count] = tmpl;
7144 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
7149 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7151 if (idesc->operands[i] == IA64_OPND_F2
7152 || idesc->operands[i] == IA64_OPND_F3
7153 || idesc->operands[i] == IA64_OPND_F4)
7155 specs[count] = tmpl;
7156 specs[count++].index =
7157 CURR_SLOT.opnd[i].X_add_number - REG_FR;
7166 /* This reference applies only to the GR whose value is loaded with
7167 data returned from memory. */
7168 specs[count] = tmpl;
7169 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
7175 for (i = 0; i < idesc->num_outputs; i++)
7176 if (idesc->operands[i] == IA64_OPND_R1
7177 || idesc->operands[i] == IA64_OPND_R2
7178 || idesc->operands[i] == IA64_OPND_R3)
7180 specs[count] = tmpl;
7181 specs[count++].index =
7182 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7184 if (idesc->flags & IA64_OPCODE_POSTINC)
7185 for (i = 0; i < NELEMS (idesc->operands); i++)
7186 if (idesc->operands[i] == IA64_OPND_MR3)
7188 specs[count] = tmpl;
7189 specs[count++].index =
7190 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7195 /* Look for anything that reads a GR. */
7196 for (i = 0; i < NELEMS (idesc->operands); i++)
7198 if (idesc->operands[i] == IA64_OPND_MR3
7199 || idesc->operands[i] == IA64_OPND_CPUID_R3
7200 || idesc->operands[i] == IA64_OPND_DBR_R3
7201 || idesc->operands[i] == IA64_OPND_IBR_R3
7202 || idesc->operands[i] == IA64_OPND_MSR_R3
7203 || idesc->operands[i] == IA64_OPND_PKR_R3
7204 || idesc->operands[i] == IA64_OPND_PMC_R3
7205 || idesc->operands[i] == IA64_OPND_PMD_R3
7206 || idesc->operands[i] == IA64_OPND_RR_R3
7207 || ((i >= idesc->num_outputs)
7208 && (idesc->operands[i] == IA64_OPND_R1
7209 || idesc->operands[i] == IA64_OPND_R2
7210 || idesc->operands[i] == IA64_OPND_R3
7211 /* addl source register. */
7212 || idesc->operands[i] == IA64_OPND_R3_2)))
7214 specs[count] = tmpl;
7215 specs[count++].index =
7216 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7227 /* This is the same as IA64_RS_PRr, except that the register range is
7228 from 1 - 15, and there are no rotating register reads/writes here. */
7232 for (i = 1; i < 16; i++)
7234 specs[count] = tmpl;
7235 specs[count++].index = i;
7241 /* Mark only those registers indicated by the mask. */
7244 mask = CURR_SLOT.opnd[2].X_add_number;
7245 for (i = 1; i < 16; i++)
7246 if (mask & ((valueT) 1 << i))
7248 specs[count] = tmpl;
7249 specs[count++].index = i;
7257 else if (note == 11) /* note 11 implies note 1 as well */
7261 for (i = 0; i < idesc->num_outputs; i++)
7263 if (idesc->operands[i] == IA64_OPND_P1
7264 || idesc->operands[i] == IA64_OPND_P2)
7266 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7267 if (regno >= 1 && regno < 16)
7269 specs[count] = tmpl;
7270 specs[count++].index = regno;
7280 else if (note == 12)
7282 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7284 specs[count] = tmpl;
7285 specs[count++].index = CURR_SLOT.qp_regno;
7292 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7293 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7294 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
7295 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
7297 if ((idesc->operands[0] == IA64_OPND_P1
7298 || idesc->operands[0] == IA64_OPND_P2)
7299 && p1 >= 1 && p1 < 16)
7301 specs[count] = tmpl;
7302 specs[count].cmp_type =
7303 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7304 specs[count++].index = p1;
7306 if ((idesc->operands[1] == IA64_OPND_P1
7307 || idesc->operands[1] == IA64_OPND_P2)
7308 && p2 >= 1 && p2 < 16)
7310 specs[count] = tmpl;
7311 specs[count].cmp_type =
7312 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7313 specs[count++].index = p2;
7318 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7320 specs[count] = tmpl;
7321 specs[count++].index = CURR_SLOT.qp_regno;
7323 if (idesc->operands[1] == IA64_OPND_PR)
7325 for (i = 1; i < 16; i++)
7327 specs[count] = tmpl;
7328 specs[count++].index = i;
7339 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
7340 simplified cases of this. */
7344 for (i = 16; i < 63; i++)
7346 specs[count] = tmpl;
7347 specs[count++].index = i;
7353 /* Mark only those registers indicated by the mask. */
7355 && idesc->operands[0] == IA64_OPND_PR)
7357 mask = CURR_SLOT.opnd[2].X_add_number;
7358 if (mask & ((valueT) 1<<16))
7359 for (i = 16; i < 63; i++)
7361 specs[count] = tmpl;
7362 specs[count++].index = i;
7366 && idesc->operands[0] == IA64_OPND_PR_ROT)
7368 for (i = 16; i < 63; i++)
7370 specs[count] = tmpl;
7371 specs[count++].index = i;
7379 else if (note == 11) /* note 11 implies note 1 as well */
7383 for (i = 0; i < idesc->num_outputs; i++)
7385 if (idesc->operands[i] == IA64_OPND_P1
7386 || idesc->operands[i] == IA64_OPND_P2)
7388 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7389 if (regno >= 16 && regno < 63)
7391 specs[count] = tmpl;
7392 specs[count++].index = regno;
7402 else if (note == 12)
7404 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7406 specs[count] = tmpl;
7407 specs[count++].index = CURR_SLOT.qp_regno;
7414 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7415 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7416 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
7417 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
7419 if ((idesc->operands[0] == IA64_OPND_P1
7420 || idesc->operands[0] == IA64_OPND_P2)
7421 && p1 >= 16 && p1 < 63)
7423 specs[count] = tmpl;
7424 specs[count].cmp_type =
7425 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7426 specs[count++].index = p1;
7428 if ((idesc->operands[1] == IA64_OPND_P1
7429 || idesc->operands[1] == IA64_OPND_P2)
7430 && p2 >= 16 && p2 < 63)
7432 specs[count] = tmpl;
7433 specs[count].cmp_type =
7434 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7435 specs[count++].index = p2;
7440 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7442 specs[count] = tmpl;
7443 specs[count++].index = CURR_SLOT.qp_regno;
7445 if (idesc->operands[1] == IA64_OPND_PR)
7447 for (i = 16; i < 63; i++)
7449 specs[count] = tmpl;
7450 specs[count++].index = i;
7462 /* Verify that the instruction is using the PSR bit indicated in
7466 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
7468 if (dep->regindex < 6)
7470 specs[count++] = tmpl;
7473 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
7475 if (dep->regindex < 32
7476 || dep->regindex == 35
7477 || dep->regindex == 36
7478 || (!rsrc_write && dep->regindex == PSR_CPL))
7480 specs[count++] = tmpl;
7483 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
7485 if (dep->regindex < 32
7486 || dep->regindex == 35
7487 || dep->regindex == 36
7488 || (rsrc_write && dep->regindex == PSR_CPL))
7490 specs[count++] = tmpl;
7495 /* Several PSR bits have very specific dependencies. */
7496 switch (dep->regindex)
7499 specs[count++] = tmpl;
7504 specs[count++] = tmpl;
7508 /* Only certain CR accesses use PSR.ic */
7509 if (idesc->operands[0] == IA64_OPND_CR3
7510 || idesc->operands[1] == IA64_OPND_CR3)
7513 ((idesc->operands[0] == IA64_OPND_CR3)
7516 CURR_SLOT.opnd[index].X_add_number - REG_CR;
7531 specs[count++] = tmpl;
7540 specs[count++] = tmpl;
7544 /* Only some AR accesses use cpl */
7545 if (idesc->operands[0] == IA64_OPND_AR3
7546 || idesc->operands[1] == IA64_OPND_AR3)
7549 ((idesc->operands[0] == IA64_OPND_AR3)
7552 CURR_SLOT.opnd[index].X_add_number - REG_AR;
7559 && regno <= AR_K7))))
7561 specs[count++] = tmpl;
7566 specs[count++] = tmpl;
7576 if (idesc->operands[0] == IA64_OPND_IMMU24)
7578 mask = CURR_SLOT.opnd[0].X_add_number;
7584 if (mask & ((valueT) 1 << dep->regindex))
7586 specs[count++] = tmpl;
7591 int min = dep->regindex == PSR_DFL ? 2 : 32;
7592 int max = dep->regindex == PSR_DFL ? 31 : 127;
7593 /* dfh is read on FR32-127; dfl is read on FR2-31 */
7594 for (i = 0; i < NELEMS (idesc->operands); i++)
7596 if (idesc->operands[i] == IA64_OPND_F1
7597 || idesc->operands[i] == IA64_OPND_F2
7598 || idesc->operands[i] == IA64_OPND_F3
7599 || idesc->operands[i] == IA64_OPND_F4)
7601 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
7602 if (reg >= min && reg <= max)
7604 specs[count++] = tmpl;
7611 int min = dep->regindex == PSR_MFL ? 2 : 32;
7612 int max = dep->regindex == PSR_MFL ? 31 : 127;
7613 /* mfh is read on writes to FR32-127; mfl is read on writes to
7615 for (i = 0; i < idesc->num_outputs; i++)
7617 if (idesc->operands[i] == IA64_OPND_F1)
7619 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
7620 if (reg >= min && reg <= max)
7622 specs[count++] = tmpl;
7627 else if (note == 10)
7629 for (i = 0; i < NELEMS (idesc->operands); i++)
7631 if (idesc->operands[i] == IA64_OPND_R1
7632 || idesc->operands[i] == IA64_OPND_R2
7633 || idesc->operands[i] == IA64_OPND_R3)
7635 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
7636 if (regno >= 16 && regno <= 31)
7638 specs[count++] = tmpl;
7649 case IA64_RS_AR_FPSR:
7650 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7652 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7653 if (regno == AR_FPSR)
7655 specs[count++] = tmpl;
7660 specs[count++] = tmpl;
7665 /* Handle all AR[REG] resources */
7666 if (note == 0 || note == 1)
7668 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7669 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
7670 && regno == dep->regindex)
7672 specs[count++] = tmpl;
7674 /* other AR[REG] resources may be affected by AR accesses */
7675 else if (idesc->operands[0] == IA64_OPND_AR3)
7678 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
7679 switch (dep->regindex)
7685 if (regno == AR_BSPSTORE)
7687 specs[count++] = tmpl;
7691 (regno == AR_BSPSTORE
7692 || regno == AR_RNAT))
7694 specs[count++] = tmpl;
7699 else if (idesc->operands[1] == IA64_OPND_AR3)
7702 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
7703 switch (dep->regindex)
7708 if (regno == AR_BSPSTORE || regno == AR_RNAT)
7710 specs[count++] = tmpl;
7717 specs[count++] = tmpl;
7727 /* Handle all CR[REG] resources */
7728 if (note == 0 || note == 1)
7730 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
7732 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7733 if (regno == dep->regindex)
7735 specs[count++] = tmpl;
7737 else if (!rsrc_write)
7739 /* Reads from CR[IVR] affect other resources. */
7740 if (regno == CR_IVR)
7742 if ((dep->regindex >= CR_IRR0
7743 && dep->regindex <= CR_IRR3)
7744 || dep->regindex == CR_TPR)
7746 specs[count++] = tmpl;
7753 specs[count++] = tmpl;
7762 case IA64_RS_INSERVICE:
7763 /* look for write of EOI (67) or read of IVR (65) */
7764 if ((idesc->operands[0] == IA64_OPND_CR3
7765 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
7766 || (idesc->operands[1] == IA64_OPND_CR3
7767 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
7769 specs[count++] = tmpl;
7776 specs[count++] = tmpl;
7787 specs[count++] = tmpl;
7791 /* Check if any of the registers accessed are in the rotating region.
7792 mov to/from pr accesses CFM only when qp_regno is in the rotating
7794 for (i = 0; i < NELEMS (idesc->operands); i++)
7796 if (idesc->operands[i] == IA64_OPND_R1
7797 || idesc->operands[i] == IA64_OPND_R2
7798 || idesc->operands[i] == IA64_OPND_R3)
7800 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
7801 /* Assumes that md.rot.num_regs is always valid */
7802 if (md.rot.num_regs > 0
7804 && num < 31 + md.rot.num_regs)
7806 specs[count] = tmpl;
7807 specs[count++].specific = 0;
7810 else if (idesc->operands[i] == IA64_OPND_F1
7811 || idesc->operands[i] == IA64_OPND_F2
7812 || idesc->operands[i] == IA64_OPND_F3
7813 || idesc->operands[i] == IA64_OPND_F4)
7815 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
7818 specs[count] = tmpl;
7819 specs[count++].specific = 0;
7822 else if (idesc->operands[i] == IA64_OPND_P1
7823 || idesc->operands[i] == IA64_OPND_P2)
7825 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
7828 specs[count] = tmpl;
7829 specs[count++].specific = 0;
7833 if (CURR_SLOT.qp_regno > 15)
7835 specs[count] = tmpl;
7836 specs[count++].specific = 0;
7841 /* This is the same as IA64_RS_PRr, except simplified to account for
7842 the fact that there is only one register. */
7846 specs[count++] = tmpl;
7851 if (idesc->operands[2] == IA64_OPND_IMM17)
7852 mask = CURR_SLOT.opnd[2].X_add_number;
7853 if (mask & ((valueT) 1 << 63))
7854 specs[count++] = tmpl;
7856 else if (note == 11)
7858 if ((idesc->operands[0] == IA64_OPND_P1
7859 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
7860 || (idesc->operands[1] == IA64_OPND_P2
7861 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
7863 specs[count++] = tmpl;
7866 else if (note == 12)
7868 if (CURR_SLOT.qp_regno == 63)
7870 specs[count++] = tmpl;
7877 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7878 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7879 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
7880 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
7883 && (idesc->operands[0] == IA64_OPND_P1
7884 || idesc->operands[0] == IA64_OPND_P2))
7886 specs[count] = tmpl;
7887 specs[count++].cmp_type =
7888 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7891 && (idesc->operands[1] == IA64_OPND_P1
7892 || idesc->operands[1] == IA64_OPND_P2))
7894 specs[count] = tmpl;
7895 specs[count++].cmp_type =
7896 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7901 if (CURR_SLOT.qp_regno == 63)
7903 specs[count++] = tmpl;
7914 /* FIXME we can identify some individual RSE written resources, but RSE
7915 read resources have not yet been completely identified, so for now
7916 treat RSE as a single resource */
7917 if (strncmp (idesc->name, "mov", 3) == 0)
7921 if (idesc->operands[0] == IA64_OPND_AR3
7922 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
7924 specs[count] = tmpl;
7925 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
7930 if (idesc->operands[0] == IA64_OPND_AR3)
7932 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
7933 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
7935 specs[count++] = tmpl;
7938 else if (idesc->operands[1] == IA64_OPND_AR3)
7940 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
7941 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
7942 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
7944 specs[count++] = tmpl;
7951 specs[count++] = tmpl;
7956 /* FIXME -- do any of these need to be non-specific? */
7957 specs[count++] = tmpl;
7961 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
7968 /* Clear branch flags on marked resources. This breaks the link between the
7969 QP of the marking instruction and a subsequent branch on the same QP. */
7972 clear_qp_branch_flag (mask)
7976 for (i = 0; i < regdepslen; i++)
7978 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
7979 if ((bit & mask) != 0)
7981 regdeps[i].link_to_qp_branch = 0;
7986 /* Remove any mutexes which contain any of the PRs indicated in the mask.
7988 Any changes to a PR clears the mutex relations which include that PR. */
7991 clear_qp_mutex (mask)
7997 while (i < qp_mutexeslen)
7999 if ((qp_mutexes[i].prmask & mask) != 0)
8003 fprintf (stderr, " Clearing mutex relation");
8004 print_prmask (qp_mutexes[i].prmask);
8005 fprintf (stderr, "\n");
8007 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8014 /* Clear implies relations which contain PRs in the given masks.
8015 P1_MASK indicates the source of the implies relation, while P2_MASK
8016 indicates the implied PR. */
8019 clear_qp_implies (p1_mask, p2_mask)
8026 while (i < qp_implieslen)
8028 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
8029 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
8032 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
8033 qp_implies[i].p1, qp_implies[i].p2);
8034 qp_implies[i] = qp_implies[--qp_implieslen];
8041 /* Add the PRs specified to the list of implied relations. */
8044 add_qp_imply (p1, p2)
8051 /* p0 is not meaningful here. */
8052 if (p1 == 0 || p2 == 0)
8058 /* If it exists already, ignore it. */
8059 for (i = 0; i < qp_implieslen; i++)
8061 if (qp_implies[i].p1 == p1
8062 && qp_implies[i].p2 == p2
8063 && qp_implies[i].path == md.path
8064 && !qp_implies[i].p2_branched)
8068 if (qp_implieslen == qp_impliestotlen)
8070 qp_impliestotlen += 20;
8071 qp_implies = (struct qp_imply *)
8072 xrealloc ((void *) qp_implies,
8073 qp_impliestotlen * sizeof (struct qp_imply));
8076 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
8077 qp_implies[qp_implieslen].p1 = p1;
8078 qp_implies[qp_implieslen].p2 = p2;
8079 qp_implies[qp_implieslen].path = md.path;
8080 qp_implies[qp_implieslen++].p2_branched = 0;
8082 /* Add in the implied transitive relations; for everything that p2 implies,
8083 make p1 imply that, too; for everything that implies p1, make it imply p2
8085 for (i = 0; i < qp_implieslen; i++)
8087 if (qp_implies[i].p1 == p2)
8088 add_qp_imply (p1, qp_implies[i].p2);
8089 if (qp_implies[i].p2 == p1)
8090 add_qp_imply (qp_implies[i].p1, p2);
8092 /* Add in mutex relations implied by this implies relation; for each mutex
8093 relation containing p2, duplicate it and replace p2 with p1. */
8094 bit = (valueT) 1 << p1;
8095 mask = (valueT) 1 << p2;
8096 for (i = 0; i < qp_mutexeslen; i++)
8098 if (qp_mutexes[i].prmask & mask)
8099 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
8103 /* Add the PRs specified in the mask to the mutex list; this means that only
8104 one of the PRs can be true at any time. PR0 should never be included in
8114 if (qp_mutexeslen == qp_mutexestotlen)
8116 qp_mutexestotlen += 20;
8117 qp_mutexes = (struct qpmutex *)
8118 xrealloc ((void *) qp_mutexes,
8119 qp_mutexestotlen * sizeof (struct qpmutex));
8123 fprintf (stderr, " Registering mutex on");
8124 print_prmask (mask);
8125 fprintf (stderr, "\n");
8127 qp_mutexes[qp_mutexeslen].path = md.path;
8128 qp_mutexes[qp_mutexeslen++].prmask = mask;
8132 clear_register_values ()
8136 fprintf (stderr, " Clearing register values\n");
8137 for (i = 1; i < NELEMS (gr_values); i++)
8138 gr_values[i].known = 0;
8141 /* Keep track of register values/changes which affect DV tracking.
8143 optimization note: should add a flag to classes of insns where otherwise we
8144 have to examine a group of strings to identify them. */
8147 note_register_values (idesc)
8148 struct ia64_opcode *idesc;
8150 valueT qp_changemask = 0;
8153 /* Invalidate values for registers being written to. */
8154 for (i = 0; i < idesc->num_outputs; i++)
8156 if (idesc->operands[i] == IA64_OPND_R1
8157 || idesc->operands[i] == IA64_OPND_R2
8158 || idesc->operands[i] == IA64_OPND_R3)
8160 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8161 if (regno > 0 && regno < NELEMS (gr_values))
8162 gr_values[regno].known = 0;
8164 else if (idesc->operands[i] == IA64_OPND_R3_2)
8166 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8167 if (regno > 0 && regno < 4)
8168 gr_values[regno].known = 0;
8170 else if (idesc->operands[i] == IA64_OPND_P1
8171 || idesc->operands[i] == IA64_OPND_P2)
8173 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8174 qp_changemask |= (valueT) 1 << regno;
8176 else if (idesc->operands[i] == IA64_OPND_PR)
8178 if (idesc->operands[2] & (valueT) 0x10000)
8179 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
8181 qp_changemask = idesc->operands[2];
8184 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
8186 if (idesc->operands[1] & ((valueT) 1 << 43))
8187 qp_changemask = ~(valueT) 0xFFFFFFFFFFF | idesc->operands[1];
8189 qp_changemask = idesc->operands[1];
8190 qp_changemask &= ~(valueT) 0xFFFF;
8195 /* Always clear qp branch flags on any PR change. */
8196 /* FIXME there may be exceptions for certain compares. */
8197 clear_qp_branch_flag (qp_changemask);
8199 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
8200 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
8202 qp_changemask |= ~(valueT) 0xFFFF;
8203 if (strcmp (idesc->name, "clrrrb.pr") != 0)
8205 for (i = 32; i < 32 + md.rot.num_regs; i++)
8206 gr_values[i].known = 0;
8208 clear_qp_mutex (qp_changemask);
8209 clear_qp_implies (qp_changemask, qp_changemask);
8211 /* After a call, all register values are undefined, except those marked
8213 else if (strncmp (idesc->name, "br.call", 6) == 0
8214 || strncmp (idesc->name, "brl.call", 7) == 0)
8216 /* FIXME keep GR values which are marked as "safe_across_calls" */
8217 clear_register_values ();
8218 clear_qp_mutex (~qp_safe_across_calls);
8219 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
8220 clear_qp_branch_flag (~qp_safe_across_calls);
8222 else if (is_interruption_or_rfi (idesc)
8223 || is_taken_branch (idesc))
8225 clear_register_values ();
8226 clear_qp_mutex (~(valueT) 0);
8227 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
8229 /* Look for mutex and implies relations. */
8230 else if ((idesc->operands[0] == IA64_OPND_P1
8231 || idesc->operands[0] == IA64_OPND_P2)
8232 && (idesc->operands[1] == IA64_OPND_P1
8233 || idesc->operands[1] == IA64_OPND_P2))
8235 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8236 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8237 valueT p1mask = (valueT) 1 << p1;
8238 valueT p2mask = (valueT) 1 << p2;
8240 /* If one of the PRs is PR0, we can't really do anything. */
8241 if (p1 == 0 || p2 == 0)
8244 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
8246 /* In general, clear mutexes and implies which include P1 or P2,
8247 with the following exceptions. */
8248 else if (strstr (idesc->name, ".or.andcm") != NULL)
8250 add_qp_mutex (p1mask | p2mask);
8251 clear_qp_implies (p2mask, p1mask);
8253 else if (strstr (idesc->name, ".and.orcm") != NULL)
8255 add_qp_mutex (p1mask | p2mask);
8256 clear_qp_implies (p1mask, p2mask);
8258 else if (strstr (idesc->name, ".and") != NULL)
8260 clear_qp_implies (0, p1mask | p2mask);
8262 else if (strstr (idesc->name, ".or") != NULL)
8264 clear_qp_mutex (p1mask | p2mask);
8265 clear_qp_implies (p1mask | p2mask, 0);
8269 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
8270 if (strstr (idesc->name, ".unc") != NULL)
8272 add_qp_mutex (p1mask | p2mask);
8273 if (CURR_SLOT.qp_regno != 0)
8275 add_qp_imply (CURR_SLOT.opnd[0].X_add_number - REG_P,
8276 CURR_SLOT.qp_regno);
8277 add_qp_imply (CURR_SLOT.opnd[1].X_add_number - REG_P,
8278 CURR_SLOT.qp_regno);
8281 else if (CURR_SLOT.qp_regno == 0)
8283 add_qp_mutex (p1mask | p2mask);
8287 clear_qp_mutex (p1mask | p2mask);
8291 /* Look for mov imm insns into GRs. */
8292 else if (idesc->operands[0] == IA64_OPND_R1
8293 && (idesc->operands[1] == IA64_OPND_IMM22
8294 || idesc->operands[1] == IA64_OPND_IMMU64)
8295 && (strcmp (idesc->name, "mov") == 0
8296 || strcmp (idesc->name, "movl") == 0))
8298 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8299 if (regno > 0 && regno < NELEMS (gr_values))
8301 gr_values[regno].known = 1;
8302 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
8303 gr_values[regno].path = md.path;
8305 fprintf (stderr, " Know gr%d = 0x%llx\n",
8306 regno, gr_values[regno].value);
8311 clear_qp_mutex (qp_changemask);
8312 clear_qp_implies (qp_changemask, qp_changemask);
8316 /* Return whether the given predicate registers are currently mutex. */
8319 qp_mutex (p1, p2, path)
8329 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
8330 for (i = 0; i < qp_mutexeslen; i++)
8332 if (qp_mutexes[i].path >= path
8333 && (qp_mutexes[i].prmask & mask) == mask)
8340 /* Return whether the given resource is in the given insn's list of chks
8341 Return 1 if the conflict is absolutely determined, 2 if it's a potential
8345 resources_match (rs, idesc, note, qp_regno, path)
8347 struct ia64_opcode *idesc;
8352 struct rsrc specs[MAX_SPECS];
8355 /* If the marked resource's qp_regno and the given qp_regno are mutex,
8356 we don't need to check. One exception is note 11, which indicates that
8357 target predicates are written regardless of PR[qp]. */
8358 if (qp_mutex (rs->qp_regno, qp_regno, path)
8362 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
8365 /* UNAT checking is a bit more specific than other resources */
8366 if (rs->dependency->specifier == IA64_RS_AR_UNAT
8367 && specs[count].mem_offset.hint
8368 && rs->mem_offset.hint)
8370 if (rs->mem_offset.base == specs[count].mem_offset.base)
8372 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
8373 ((specs[count].mem_offset.offset >> 3) & 0x3F))
8380 /* Skip apparent PR write conflicts where both writes are an AND or both
8381 writes are an OR. */
8382 if (rs->dependency->specifier == IA64_RS_PR
8383 || rs->dependency->specifier == IA64_RS_PRr
8384 || rs->dependency->specifier == IA64_RS_PR63)
8386 if (specs[count].cmp_type != CMP_NONE
8387 && specs[count].cmp_type == rs->cmp_type)
8390 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
8391 dv_mode[rs->dependency->mode],
8392 rs->dependency->specifier != IA64_RS_PR63 ?
8393 specs[count].index : 63);
8398 " %s on parallel compare conflict %s vs %s on PR%d\n",
8399 dv_mode[rs->dependency->mode],
8400 dv_cmp_type[rs->cmp_type],
8401 dv_cmp_type[specs[count].cmp_type],
8402 rs->dependency->specifier != IA64_RS_PR63 ?
8403 specs[count].index : 63);
8407 /* If either resource is not specific, conservatively assume a conflict
8409 if (!specs[count].specific || !rs->specific)
8411 else if (specs[count].index == rs->index)
8416 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
8422 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
8423 insert a stop to create the break. Update all resource dependencies
8424 appropriately. If QP_REGNO is non-zero, only apply the break to resources
8425 which use the same QP_REGNO and have the link_to_qp_branch flag set.
8426 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
8430 insn_group_break (insert_stop, qp_regno, save_current)
8437 if (insert_stop && md.num_slots_in_use > 0)
8438 PREV_SLOT.end_of_insn_group = 1;
8442 fprintf (stderr, " Insn group break%s",
8443 (insert_stop ? " (w/stop)" : ""));
8445 fprintf (stderr, " effective for QP=%d", qp_regno);
8446 fprintf (stderr, "\n");
8450 while (i < regdepslen)
8452 const struct ia64_dependency *dep = regdeps[i].dependency;
8455 && regdeps[i].qp_regno != qp_regno)
8462 && CURR_SLOT.src_file == regdeps[i].file
8463 && CURR_SLOT.src_line == regdeps[i].line)
8469 /* clear dependencies which are automatically cleared by a stop, or
8470 those that have reached the appropriate state of insn serialization */
8471 if (dep->semantics == IA64_DVS_IMPLIED
8472 || dep->semantics == IA64_DVS_IMPLIEDF
8473 || regdeps[i].insn_srlz == STATE_SRLZ)
8475 print_dependency ("Removing", i);
8476 regdeps[i] = regdeps[--regdepslen];
8480 if (dep->semantics == IA64_DVS_DATA
8481 || dep->semantics == IA64_DVS_INSTR
8482 || dep->semantics == IA64_DVS_SPECIFIC)
8484 if (regdeps[i].insn_srlz == STATE_NONE)
8485 regdeps[i].insn_srlz = STATE_STOP;
8486 if (regdeps[i].data_srlz == STATE_NONE)
8487 regdeps[i].data_srlz = STATE_STOP;
8494 /* Add the given resource usage spec to the list of active dependencies. */
8497 mark_resource (idesc, dep, spec, depind, path)
8498 struct ia64_opcode *idesc;
8499 const struct ia64_dependency *dep;
8504 if (regdepslen == regdepstotlen)
8506 regdepstotlen += 20;
8507 regdeps = (struct rsrc *)
8508 xrealloc ((void *) regdeps,
8509 regdepstotlen * sizeof(struct rsrc));
8512 regdeps[regdepslen] = *spec;
8513 regdeps[regdepslen].depind = depind;
8514 regdeps[regdepslen].path = path;
8515 regdeps[regdepslen].file = CURR_SLOT.src_file;
8516 regdeps[regdepslen].line = CURR_SLOT.src_line;
8518 print_dependency ("Adding", regdepslen);
8524 print_dependency (action, depind)
8530 fprintf (stderr, " %s %s '%s'",
8531 action, dv_mode[(regdeps[depind].dependency)->mode],
8532 (regdeps[depind].dependency)->name);
8533 if (regdeps[depind].specific && regdeps[depind].index != 0)
8534 fprintf (stderr, " (%d)", regdeps[depind].index);
8535 if (regdeps[depind].mem_offset.hint)
8536 fprintf (stderr, " 0x%llx+0x%llx",
8537 regdeps[depind].mem_offset.base,
8538 regdeps[depind].mem_offset.offset);
8539 fprintf (stderr, "\n");
8544 instruction_serialization ()
8548 fprintf (stderr, " Instruction serialization\n");
8549 for (i = 0; i < regdepslen; i++)
8550 if (regdeps[i].insn_srlz == STATE_STOP)
8551 regdeps[i].insn_srlz = STATE_SRLZ;
8555 data_serialization ()
8559 fprintf (stderr, " Data serialization\n");
8560 while (i < regdepslen)
8562 if (regdeps[i].data_srlz == STATE_STOP
8563 /* Note: as of 991210, all "other" dependencies are cleared by a
8564 data serialization. This might change with new tables */
8565 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
8567 print_dependency ("Removing", i);
8568 regdeps[i] = regdeps[--regdepslen];
8575 /* Insert stops and serializations as needed to avoid DVs. */
8578 remove_marked_resource (rs)
8581 switch (rs->dependency->semantics)
8583 case IA64_DVS_SPECIFIC:
8585 fprintf (stderr, "Implementation-specific, assume worst case...\n");
8586 /* ...fall through... */
8587 case IA64_DVS_INSTR:
8589 fprintf (stderr, "Inserting instr serialization\n");
8590 if (rs->insn_srlz < STATE_STOP)
8591 insn_group_break (1, 0, 0);
8592 if (rs->insn_srlz < STATE_SRLZ)
8594 int oldqp = CURR_SLOT.qp_regno;
8595 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
8596 /* Manually jam a srlz.i insn into the stream */
8597 CURR_SLOT.qp_regno = 0;
8598 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
8599 instruction_serialization ();
8600 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
8601 if (++md.num_slots_in_use >= NUM_SLOTS)
8603 CURR_SLOT.qp_regno = oldqp;
8604 CURR_SLOT.idesc = oldidesc;
8606 insn_group_break (1, 0, 0);
8608 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
8609 "other" types of DV are eliminated
8610 by a data serialization */
8613 fprintf (stderr, "Inserting data serialization\n");
8614 if (rs->data_srlz < STATE_STOP)
8615 insn_group_break (1, 0, 0);
8617 int oldqp = CURR_SLOT.qp_regno;
8618 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
8619 /* Manually jam a srlz.d insn into the stream */
8620 CURR_SLOT.qp_regno = 0;
8621 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
8622 data_serialization ();
8623 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
8624 if (++md.num_slots_in_use >= NUM_SLOTS)
8626 CURR_SLOT.qp_regno = oldqp;
8627 CURR_SLOT.idesc = oldidesc;
8630 case IA64_DVS_IMPLIED:
8631 case IA64_DVS_IMPLIEDF:
8633 fprintf (stderr, "Inserting stop\n");
8634 insn_group_break (1, 0, 0);
8641 /* Check the resources used by the given opcode against the current dependency
8644 The check is run once for each execution path encountered. In this case,
8645 a unique execution path is the sequence of instructions following a code
8646 entry point, e.g. the following has three execution paths, one starting
8647 at L0, one at L1, and one at L2.
8656 check_dependencies (idesc)
8657 struct ia64_opcode *idesc;
8659 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
8663 /* Note that the number of marked resources may change within the
8664 loop if in auto mode. */
8666 while (i < regdepslen)
8668 struct rsrc *rs = ®deps[i];
8669 const struct ia64_dependency *dep = rs->dependency;
8674 if (dep->semantics == IA64_DVS_NONE
8675 || (chkind = depends_on (rs->depind, idesc)) == -1)
8681 note = NOTE (opdeps->chks[chkind]);
8683 /* Check this resource against each execution path seen thus far. */
8684 for (path = 0; path <= md.path; path++)
8688 /* If the dependency wasn't on the path being checked, ignore it. */
8689 if (rs->path < path)
8692 /* If the QP for this insn implies a QP which has branched, don't
8693 bother checking. Ed. NOTE: I don't think this check is terribly
8694 useful; what's the point of generating code which will only be
8695 reached if its QP is zero?
8696 This code was specifically inserted to handle the following code,
8697 based on notes from Intel's DV checking code, where p1 implies p2.
8703 if (CURR_SLOT.qp_regno != 0)
8707 for (implies = 0; implies < qp_implieslen; implies++)
8709 if (qp_implies[implies].path >= path
8710 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
8711 && qp_implies[implies].p2_branched)
8721 if ((matchtype = resources_match (rs, idesc, note,
8722 CURR_SLOT.qp_regno, path)) != 0)
8725 char pathmsg[256] = "";
8726 char indexmsg[256] = "";
8727 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
8730 sprintf (pathmsg, " when entry is at label '%s'",
8731 md.entry_labels[path - 1]);
8732 if (rs->specific && rs->index != 0)
8733 sprintf (indexmsg, ", specific resource number is %d",
8735 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
8737 (certain ? "violates" : "may violate"),
8738 dv_mode[dep->mode], dep->name,
8739 dv_sem[dep->semantics],
8742 if (md.explicit_mode)
8744 as_warn ("%s", msg);
8746 as_warn (_("Only the first path encountering the conflict "
8748 as_warn_where (rs->file, rs->line,
8749 _("This is the location of the "
8750 "conflicting usage"));
8751 /* Don't bother checking other paths, to avoid duplicating
8758 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
8760 remove_marked_resource (rs);
8762 /* since the set of dependencies has changed, start over */
8763 /* FIXME -- since we're removing dvs as we go, we
8764 probably don't really need to start over... */
8777 /* Register new dependencies based on the given opcode. */
8780 mark_resources (idesc)
8781 struct ia64_opcode *idesc;
8784 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
8785 int add_only_qp_reads = 0;
8787 /* A conditional branch only uses its resources if it is taken; if it is
8788 taken, we stop following that path. The other branch types effectively
8789 *always* write their resources. If it's not taken, register only QP
8791 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
8793 add_only_qp_reads = 1;
8797 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
8799 for (i = 0; i < opdeps->nregs; i++)
8801 const struct ia64_dependency *dep;
8802 struct rsrc specs[MAX_SPECS];
8807 dep = ia64_find_dependency (opdeps->regs[i]);
8808 note = NOTE (opdeps->regs[i]);
8810 if (add_only_qp_reads
8811 && !(dep->mode == IA64_DV_WAR
8812 && (dep->specifier == IA64_RS_PR
8813 || dep->specifier == IA64_RS_PRr
8814 || dep->specifier == IA64_RS_PR63)))
8817 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
8820 if (md.debug_dv && !count)
8821 fprintf (stderr, " No %s %s usage found (path %d)\n",
8822 dv_mode[dep->mode], dep->name, md.path);
8827 mark_resource (idesc, dep, &specs[count],
8828 DEP (opdeps->regs[i]), md.path);
8831 /* The execution path may affect register values, which may in turn
8832 affect which indirect-access resources are accessed. */
8833 switch (dep->specifier)
8845 for (path = 0; path < md.path; path++)
8847 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
8849 mark_resource (idesc, dep, &specs[count],
8850 DEP (opdeps->regs[i]), path);
8857 /* Remove dependencies when they no longer apply. */
8860 update_dependencies (idesc)
8861 struct ia64_opcode *idesc;
8865 if (strcmp (idesc->name, "srlz.i") == 0)
8867 instruction_serialization ();
8869 else if (strcmp (idesc->name, "srlz.d") == 0)
8871 data_serialization ();
8873 else if (is_interruption_or_rfi (idesc)
8874 || is_taken_branch (idesc))
8876 /* Although technically the taken branch doesn't clear dependencies
8877 which require a srlz.[id], we don't follow the branch; the next
8878 instruction is assumed to start with a clean slate. */
8882 else if (is_conditional_branch (idesc)
8883 && CURR_SLOT.qp_regno != 0)
8885 int is_call = strstr (idesc->name, ".call") != NULL;
8887 for (i = 0; i < qp_implieslen; i++)
8889 /* If the conditional branch's predicate is implied by the predicate
8890 in an existing dependency, remove that dependency. */
8891 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
8894 /* Note that this implied predicate takes a branch so that if
8895 a later insn generates a DV but its predicate implies this
8896 one, we can avoid the false DV warning. */
8897 qp_implies[i].p2_branched = 1;
8898 while (depind < regdepslen)
8900 if (regdeps[depind].qp_regno == qp_implies[i].p1)
8902 print_dependency ("Removing", depind);
8903 regdeps[depind] = regdeps[--regdepslen];
8910 /* Any marked resources which have this same predicate should be
8911 cleared, provided that the QP hasn't been modified between the
8912 marking instruction and the branch. */
8915 insn_group_break (0, CURR_SLOT.qp_regno, 1);
8920 while (i < regdepslen)
8922 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
8923 && regdeps[i].link_to_qp_branch
8924 && (regdeps[i].file != CURR_SLOT.src_file
8925 || regdeps[i].line != CURR_SLOT.src_line))
8927 /* Treat like a taken branch */
8928 print_dependency ("Removing", i);
8929 regdeps[i] = regdeps[--regdepslen];
8938 /* Examine the current instruction for dependency violations. */
8942 struct ia64_opcode *idesc;
8946 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
8947 idesc->name, CURR_SLOT.src_line,
8948 idesc->dependencies->nchks,
8949 idesc->dependencies->nregs);
8952 /* Look through the list of currently marked resources; if the current
8953 instruction has the dependency in its chks list which uses that resource,
8954 check against the specific resources used. */
8955 check_dependencies (idesc);
8957 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
8958 then add them to the list of marked resources. */
8959 mark_resources (idesc);
8961 /* There are several types of dependency semantics, and each has its own
8962 requirements for being cleared
8964 Instruction serialization (insns separated by interruption, rfi, or
8965 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
8967 Data serialization (instruction serialization, or writer + srlz.d +
8968 reader, where writer and srlz.d are in separate groups) clears
8969 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
8970 always be the case).
8972 Instruction group break (groups separated by stop, taken branch,
8973 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
8975 update_dependencies (idesc);
8977 /* Sometimes, knowing a register value allows us to avoid giving a false DV
8978 warning. Keep track of as many as possible that are useful. */
8979 note_register_values (idesc);
8981 /* We don't need or want this anymore. */
8982 md.mem_offset.hint = 0;
8987 /* Translate one line of assembly. Pseudo ops and labels do not show
8993 char *saved_input_line_pointer, *mnemonic;
8994 const struct pseudo_opcode *pdesc;
8995 struct ia64_opcode *idesc;
8996 unsigned char qp_regno;
9000 saved_input_line_pointer = input_line_pointer;
9001 input_line_pointer = str;
9003 /* extract the opcode (mnemonic): */
9005 mnemonic = input_line_pointer;
9006 ch = get_symbol_end ();
9007 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
9010 *input_line_pointer = ch;
9011 (*pdesc->handler) (pdesc->arg);
9015 /* Find the instruction descriptor matching the arguments. */
9017 idesc = ia64_find_opcode (mnemonic);
9018 *input_line_pointer = ch;
9021 as_bad ("Unknown opcode `%s'", mnemonic);
9025 idesc = parse_operands (idesc);
9029 /* Handle the dynamic ops we can handle now: */
9030 if (idesc->type == IA64_TYPE_DYN)
9032 if (strcmp (idesc->name, "add") == 0)
9034 if (CURR_SLOT.opnd[2].X_op == O_register
9035 && CURR_SLOT.opnd[2].X_add_number < 4)
9039 ia64_free_opcode (idesc);
9040 idesc = ia64_find_opcode (mnemonic);
9042 know (!idesc->next);
9045 else if (strcmp (idesc->name, "mov") == 0)
9047 enum ia64_opnd opnd1, opnd2;
9050 opnd1 = idesc->operands[0];
9051 opnd2 = idesc->operands[1];
9052 if (opnd1 == IA64_OPND_AR3)
9054 else if (opnd2 == IA64_OPND_AR3)
9058 if (CURR_SLOT.opnd[rop].X_op == O_register
9059 && ar_is_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
9063 ia64_free_opcode (idesc);
9064 idesc = ia64_find_opcode (mnemonic);
9065 while (idesc != NULL
9066 && (idesc->operands[0] != opnd1
9067 || idesc->operands[1] != opnd2))
9068 idesc = get_next_opcode (idesc);
9073 if (md.qp.X_op == O_register)
9075 qp_regno = md.qp.X_add_number - REG_P;
9076 md.qp.X_op = O_absent;
9079 flags = idesc->flags;
9081 if ((flags & IA64_OPCODE_FIRST) != 0)
9082 insn_group_break (1, 0, 0);
9084 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
9086 as_bad ("`%s' cannot be predicated", idesc->name);
9090 /* Build the instruction. */
9091 CURR_SLOT.qp_regno = qp_regno;
9092 CURR_SLOT.idesc = idesc;
9093 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
9094 dwarf2_where (&CURR_SLOT.debug_line);
9096 /* Add unwind entry, if there is one. */
9097 if (unwind.current_entry)
9099 CURR_SLOT.unwind_record = unwind.current_entry;
9100 unwind.current_entry = NULL;
9103 /* Check for dependency violations. */
9107 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9108 if (++md.num_slots_in_use >= NUM_SLOTS)
9111 if ((flags & IA64_OPCODE_LAST) != 0)
9112 insn_group_break (1, 0, 0);
9114 md.last_text_seg = now_seg;
9117 input_line_pointer = saved_input_line_pointer;
9120 /* Called when symbol NAME cannot be found in the symbol table.
9121 Should be used for dynamic valued symbols only. */
9124 md_undefined_symbol (name)
9130 /* Called for any expression that can not be recognized. When the
9131 function is called, `input_line_pointer' will point to the start of
9138 enum pseudo_type pseudo_type;
9143 switch (*input_line_pointer)
9146 /* Find what relocation pseudo-function we're dealing with. */
9148 ch = *++input_line_pointer;
9149 for (i = 0; i < NELEMS (pseudo_func); ++i)
9150 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
9152 len = strlen (pseudo_func[i].name);
9153 if (strncmp (pseudo_func[i].name + 1,
9154 input_line_pointer + 1, len - 1) == 0
9155 && !is_part_of_name (input_line_pointer[len]))
9157 input_line_pointer += len;
9158 pseudo_type = pseudo_func[i].type;
9162 switch (pseudo_type)
9164 case PSEUDO_FUNC_RELOC:
9166 if (*input_line_pointer != '(')
9168 as_bad ("Expected '('");
9172 ++input_line_pointer;
9174 if (*input_line_pointer++ != ')')
9176 as_bad ("Missing ')'");
9179 if (e->X_op != O_symbol)
9181 if (e->X_op != O_pseudo_fixup)
9183 as_bad ("Not a symbolic expression");
9186 if (S_GET_VALUE (e->X_op_symbol) == FUNC_FPTR_RELATIVE
9187 && i == FUNC_LT_RELATIVE)
9188 i = FUNC_LT_FPTR_RELATIVE;
9191 as_bad ("Illegal combination of relocation functions");
9195 /* Make sure gas doesn't get rid of local symbols that are used
9197 e->X_op = O_pseudo_fixup;
9198 e->X_op_symbol = pseudo_func[i].u.sym;
9201 case PSEUDO_FUNC_CONST:
9202 e->X_op = O_constant;
9203 e->X_add_number = pseudo_func[i].u.ival;
9206 case PSEUDO_FUNC_REG:
9207 e->X_op = O_register;
9208 e->X_add_number = pseudo_func[i].u.ival;
9212 name = input_line_pointer - 1;
9214 as_bad ("Unknown pseudo function `%s'", name);
9220 ++input_line_pointer;
9222 if (*input_line_pointer != ']')
9224 as_bad ("Closing bracket misssing");
9229 if (e->X_op != O_register)
9230 as_bad ("Register expected as index");
9232 ++input_line_pointer;
9243 ignore_rest_of_line ();
9246 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
9247 a section symbol plus some offset. For relocs involving @fptr(),
9248 directives we don't want such adjustments since we need to have the
9249 original symbol's name in the reloc. */
9251 ia64_fix_adjustable (fix)
9254 /* Prevent all adjustments to global symbols */
9255 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
9258 switch (fix->fx_r_type)
9260 case BFD_RELOC_IA64_FPTR64I:
9261 case BFD_RELOC_IA64_FPTR32MSB:
9262 case BFD_RELOC_IA64_FPTR32LSB:
9263 case BFD_RELOC_IA64_FPTR64MSB:
9264 case BFD_RELOC_IA64_FPTR64LSB:
9265 case BFD_RELOC_IA64_LTOFF_FPTR22:
9266 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9276 ia64_force_relocation (fix)
9279 switch (fix->fx_r_type)
9281 case BFD_RELOC_IA64_FPTR64I:
9282 case BFD_RELOC_IA64_FPTR32MSB:
9283 case BFD_RELOC_IA64_FPTR32LSB:
9284 case BFD_RELOC_IA64_FPTR64MSB:
9285 case BFD_RELOC_IA64_FPTR64LSB:
9287 case BFD_RELOC_IA64_LTOFF22:
9288 case BFD_RELOC_IA64_LTOFF64I:
9289 case BFD_RELOC_IA64_LTOFF_FPTR22:
9290 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9291 case BFD_RELOC_IA64_PLTOFF22:
9292 case BFD_RELOC_IA64_PLTOFF64I:
9293 case BFD_RELOC_IA64_PLTOFF64MSB:
9294 case BFD_RELOC_IA64_PLTOFF64LSB:
9303 /* Decide from what point a pc-relative relocation is relative to,
9304 relative to the pc-relative fixup. Er, relatively speaking. */
9306 ia64_pcrel_from_section (fix, sec)
9310 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
9312 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
9318 /* This is called whenever some data item (not an instruction) needs a
9319 fixup. We pick the right reloc code depending on the byteorder
9320 currently in effect. */
9322 ia64_cons_fix_new (f, where, nbytes, exp)
9328 bfd_reloc_code_real_type code;
9333 /* There are no reloc for 8 and 16 bit quantities, but we allow
9334 them here since they will work fine as long as the expression
9335 is fully defined at the end of the pass over the source file. */
9336 case 1: code = BFD_RELOC_8; break;
9337 case 2: code = BFD_RELOC_16; break;
9339 if (target_big_endian)
9340 code = BFD_RELOC_IA64_DIR32MSB;
9342 code = BFD_RELOC_IA64_DIR32LSB;
9346 if (target_big_endian)
9347 code = BFD_RELOC_IA64_DIR64MSB;
9349 code = BFD_RELOC_IA64_DIR64LSB;
9353 as_bad ("Unsupported fixup size %d", nbytes);
9354 ignore_rest_of_line ();
9357 if (exp->X_op == O_pseudo_fixup)
9360 exp->X_op = O_symbol;
9361 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
9363 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
9364 /* We need to store the byte order in effect in case we're going
9365 to fix an 8 or 16 bit relocation (for which there no real
9366 relocs available). See md_apply_fix(). */
9367 fix->tc_fix_data.bigendian = target_big_endian;
9370 /* Return the actual relocation we wish to associate with the pseudo
9371 reloc described by SYM and R_TYPE. SYM should be one of the
9372 symbols in the pseudo_func array, or NULL. */
9374 static bfd_reloc_code_real_type
9375 ia64_gen_real_reloc_type (sym, r_type)
9377 bfd_reloc_code_real_type r_type;
9379 bfd_reloc_code_real_type new = 0;
9386 switch (S_GET_VALUE (sym))
9388 case FUNC_FPTR_RELATIVE:
9391 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
9392 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
9393 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
9394 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
9395 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
9400 case FUNC_GP_RELATIVE:
9403 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
9404 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
9405 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
9406 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
9407 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
9408 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
9413 case FUNC_LT_RELATIVE:
9416 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
9417 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
9422 case FUNC_PC_RELATIVE:
9425 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
9426 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
9427 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
9428 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
9429 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
9430 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
9435 case FUNC_PLT_RELATIVE:
9438 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
9439 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
9440 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
9441 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
9446 case FUNC_SEC_RELATIVE:
9449 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
9450 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
9451 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
9452 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
9457 case FUNC_SEG_RELATIVE:
9460 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
9461 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
9462 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
9463 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
9468 case FUNC_LTV_RELATIVE:
9471 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
9472 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
9473 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
9474 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
9479 case FUNC_LT_FPTR_RELATIVE:
9482 case BFD_RELOC_IA64_IMM22:
9483 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
9484 case BFD_RELOC_IA64_IMM64:
9485 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
9493 /* Hmmmm. Should this ever occur? */
9500 /* Here is where generate the appropriate reloc for pseudo relocation
9503 ia64_validate_fix (fix)
9506 switch (fix->fx_r_type)
9508 case BFD_RELOC_IA64_FPTR64I:
9509 case BFD_RELOC_IA64_FPTR32MSB:
9510 case BFD_RELOC_IA64_FPTR64LSB:
9511 case BFD_RELOC_IA64_LTOFF_FPTR22:
9512 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9513 if (fix->fx_offset != 0)
9514 as_bad_where (fix->fx_file, fix->fx_line,
9515 "No addend allowed in @fptr() relocation");
9525 fix_insn (fix, odesc, value)
9527 const struct ia64_operand *odesc;
9530 bfd_vma insn[3], t0, t1, control_bits;
9535 slot = fix->fx_where & 0x3;
9536 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
9538 /* Bundles are always in little-endian byte order */
9539 t0 = bfd_getl64 (fixpos);
9540 t1 = bfd_getl64 (fixpos + 8);
9541 control_bits = t0 & 0x1f;
9542 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
9543 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
9544 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
9547 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
9549 insn[1] = (value >> 22) & 0x1ffffffffffLL;
9550 insn[2] |= (((value & 0x7f) << 13)
9551 | (((value >> 7) & 0x1ff) << 27)
9552 | (((value >> 16) & 0x1f) << 22)
9553 | (((value >> 21) & 0x1) << 21)
9554 | (((value >> 63) & 0x1) << 36));
9556 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
9558 if (value & ~0x3fffffffffffffffULL)
9559 err = "integer operand out of range";
9560 insn[1] = (value >> 21) & 0x1ffffffffffLL;
9561 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
9563 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
9566 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
9567 insn[2] |= ((((value >> 59) & 0x1) << 36)
9568 | (((value >> 0) & 0xfffff) << 13));
9571 err = (*odesc->insert) (odesc, value, insn + slot);
9574 as_bad_where (fix->fx_file, fix->fx_line, err);
9576 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
9577 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
9578 number_to_chars_littleendian (fixpos + 0, t0, 8);
9579 number_to_chars_littleendian (fixpos + 8, t1, 8);
9582 /* Attempt to simplify or even eliminate a fixup. The return value is
9583 ignored; perhaps it was once meaningful, but now it is historical.
9584 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
9586 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
9589 md_apply_fix3 (fix, valuep, seg)
9595 valueT value = *valuep;
9598 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
9602 switch (fix->fx_r_type)
9604 case BFD_RELOC_IA64_DIR32MSB:
9605 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
9609 case BFD_RELOC_IA64_DIR32LSB:
9610 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
9614 case BFD_RELOC_IA64_DIR64MSB:
9615 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
9619 case BFD_RELOC_IA64_DIR64LSB:
9620 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
9630 switch (fix->fx_r_type)
9633 as_bad_where (fix->fx_file, fix->fx_line,
9634 "%s must have a constant value",
9635 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
9642 /* ??? This is a hack copied from tc-i386.c to make PCREL relocs
9643 work. There should be a better way to handle this. */
9645 fix->fx_offset += fix->fx_where + fix->fx_frag->fr_address;
9647 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
9649 if (fix->tc_fix_data.bigendian)
9650 number_to_chars_bigendian (fixpos, value, fix->fx_size);
9652 number_to_chars_littleendian (fixpos, value, fix->fx_size);
9658 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
9665 /* Generate the BFD reloc to be stuck in the object file from the
9666 fixup used internally in the assembler. */
9669 tc_gen_reloc (sec, fixp)
9675 reloc = xmalloc (sizeof (*reloc));
9676 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9677 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9678 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9679 reloc->addend = fixp->fx_offset;
9680 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
9684 as_bad_where (fixp->fx_file, fixp->fx_line,
9685 "Cannot represent %s relocation in object file",
9686 bfd_get_reloc_code_name (fixp->fx_r_type));
9691 /* Turn a string in input_line_pointer into a floating point constant
9692 of type TYPE, and store the appropriate bytes in *LIT. The number
9693 of LITTLENUMS emitted is stored in *SIZE. An error message is
9694 returned, or NULL on OK. */
9696 #define MAX_LITTLENUMS 5
9699 md_atof (type, lit, size)
9704 LITTLENUM_TYPE words[MAX_LITTLENUMS];
9705 LITTLENUM_TYPE *word;
9735 return "Bad call to MD_ATOF()";
9737 t = atof_ieee (input_line_pointer, type, words);
9739 input_line_pointer = t;
9740 *size = prec * sizeof (LITTLENUM_TYPE);
9742 for (word = words + prec - 1; prec--;)
9744 md_number_to_chars (lit, (long) (*word--), sizeof (LITTLENUM_TYPE));
9745 lit += sizeof (LITTLENUM_TYPE);
9750 /* Round up a section's size to the appropriate boundary. */
9752 md_section_align (seg, size)
9756 int align = bfd_get_section_alignment (stdoutput, seg);
9757 valueT mask = ((valueT) 1 << align) - 1;
9759 return (size + mask) & ~mask;
9762 /* Handle ia64 specific semantics of the align directive. */
9765 ia64_md_do_align (n, fill, len, max)
9771 /* Fill any pending bundle with nops. */
9772 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
9773 ia64_flush_insns ();
9775 /* When we align code in a text section, emit a bundle of 3 nops instead of
9776 zero bytes. We can only do this if a multiple of 16 bytes was requested.
9777 N is log base 2 of the requested alignment. */
9779 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE
9782 /* Use mfi bundle of nops with no stop bits. */
9783 static const unsigned char be_nop[]
9784 = { 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
9785 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c};
9786 static const unsigned char le_nop[]
9787 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
9788 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
9790 /* Make sure we are on a 16-byte boundary, in case someone has been
9791 putting data into a text section. */
9792 frag_align (4, 0, 0);
9794 if (target_big_endian)
9795 frag_align_pattern (n, be_nop, 16, max);
9797 frag_align_pattern (n, le_nop, 16, max);