1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
34 /* Need TARGET_CPU. */
41 #include "opcode/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
56 /* This structure holds the unwinding state. */
61 symbolS * table_entry;
62 symbolS * personality_routine;
63 int personality_index;
64 /* The segment containing the function. */
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes;
71 /* The number of bytes pushed to the stack. */
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
81 /* Nonzero if an unwind_setfp directive has been seen. */
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored:1;
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency = 0;
101 /* Types of processor to assemble for. */
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
128 #endif /* ifndef FPU_DEFAULT */
130 #define streq(a, b) (strcmp (a, b) == 0)
132 static arm_feature_set cpu_variant;
133 static arm_feature_set arm_arch_used;
134 static arm_feature_set thumb_arch_used;
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26 = FALSE;
138 static int atpcs = FALSE;
139 static int support_interwork = FALSE;
140 static int uses_apcs_float = FALSE;
141 static int pic_code = FALSE;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default = FPU_DEFAULT;
157 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
158 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
159 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
160 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
161 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
162 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
163 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
164 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
167 static const arm_feature_set cpu_default = CPU_DEFAULT;
170 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
171 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
173 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
174 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
175 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
176 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
177 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
178 static const arm_feature_set arm_ext_v4t_5 =
179 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
181 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
182 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
183 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
184 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
185 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
186 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
189 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
190 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
191 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
192 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
193 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
195 static const arm_feature_set arm_arch_any = ARM_ANY;
196 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
198 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
200 static const arm_feature_set arm_cext_iwmmxt =
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
202 static const arm_feature_set arm_cext_xscale =
203 ARM_FEATURE (0, ARM_CEXT_XSCALE);
204 static const arm_feature_set arm_cext_maverick =
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
206 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
207 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
208 static const arm_feature_set fpu_vfp_ext_v1xd =
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
210 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
211 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
212 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
213 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
215 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
217 static int mfloat_abi_opt = -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name[16];
224 static int meabi_flags = EABI_DEFAULT;
226 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS * GOT_symbol;
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
239 static int thumb_mode = 0;
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
253 Important differences from the old Thumb mode:
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
264 static bfd_boolean unified_syntax = FALSE;
279 enum neon_el_type type;
283 #define NEON_MAX_TYPE_ELS 4
287 struct neon_type_el el[NEON_MAX_TYPE_ELS];
294 unsigned long instruction;
298 struct neon_type vectype;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
304 bfd_reloc_code_real_type type;
313 struct neon_type_el vectype;
314 unsigned present : 1; /* Operand present. */
315 unsigned isreg : 1; /* Operand was a register. */
316 unsigned immisreg : 1; /* .imm field is a second register. */
317 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc : 1; /* Operand has relocation suffix. */
324 unsigned writeback : 1; /* Operand has trailing ! */
325 unsigned preind : 1; /* Preindexed address. */
326 unsigned postind : 1; /* Postindexed address. */
327 unsigned negative : 1; /* Index register was negated. */
328 unsigned shifted : 1; /* Shift applied to operation. */
329 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
333 static struct arm_it inst;
335 #define NUM_FLOAT_VALS 8
337 const char * fp_const[] =
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
345 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
361 #define DOUBLE_LOAD_FLAG 0x00000001
365 const char * template;
369 #define COND_ALWAYS 0xE
373 const char *template;
377 struct asm_barrier_opt
379 const char *template;
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
395 bfd_reloc_code_real_type reloc;
400 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
401 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
406 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
413 struct neon_typed_alias
415 unsigned char defined;
417 struct neon_type_el eltype;
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
452 unsigned char number;
454 unsigned char builtin;
455 struct neon_typed_alias *neon;
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs[] =
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
482 /* Some well known registers that we refer to directly elsewhere. */
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
493 /* Basic string to match. */
494 const char *template;
496 /* Parameters to instruction. */
497 unsigned char operands[8];
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag : 4;
502 /* Basic instruction code. */
503 unsigned int avalue : 28;
505 /* Thumb-format instruction code. */
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set *avariant;
510 const arm_feature_set *tvariant;
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode) (void);
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode) (void);
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
533 #define DATA_OP_SHIFT 21
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
538 /* Codes to distinguish the arithmetic instructions. */
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
618 #define T_OPCODE_BRANCH 0xe000
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
635 static struct hash_control *arm_ops_hsh;
636 static struct hash_control *arm_cond_hsh;
637 static struct hash_control *arm_shift_hsh;
638 static struct hash_control *arm_psr_hsh;
639 static struct hash_control *arm_v7m_psr_hsh;
640 static struct hash_control *arm_reg_hsh;
641 static struct hash_control *arm_reloc_hsh;
642 static struct hash_control *arm_barrier_opt_hsh;
644 /* Stuff needed to resolve the label ambiguity
654 symbolS * last_label_seen;
655 static int label_is_thumb_function_name = FALSE;
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
663 expressionS literals [MAX_LITERAL_POOL_SIZE];
664 unsigned int next_free_entry;
669 struct literal_pool * next;
672 /* Pointer to a linked list of literal pools. */
673 literal_pool * list_of_pools = NULL;
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask = 0;
677 static int current_cc;
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars[] = "@";
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars[] = "#";
695 const char line_separator_chars[] = ";";
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS[] = "eE";
701 /* Chars that mean this number is a floating point constant. */
705 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
707 /* Prefix characters that indicate the start of an immediate
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
711 /* Separator character handling. */
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
716 skip_past_char (char ** str, char c)
726 #define skip_past_comma(str) skip_past_char (str, ',')
728 /* Arithmetic expressions (possibly involving symbols). */
730 /* Return TRUE if anything in the expression is a bignum. */
733 walk_no_bignums (symbolS * sp)
735 if (symbol_get_value_expression (sp)->X_op == O_big)
738 if (symbol_get_value_expression (sp)->X_add_symbol)
740 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
741 || (symbol_get_value_expression (sp)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
748 static int in_my_get_expression = 0;
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
759 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
764 /* In unified syntax, all prefixes are optional. */
766 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
771 case GE_NO_PREFIX: break;
773 if (!is_immediate_prefix (**str))
775 inst.error = _("immediate expression requires a # prefix");
781 case GE_OPT_PREFIX_BIG:
782 if (is_immediate_prefix (**str))
788 memset (ep, 0, sizeof (expressionS));
790 save_in = input_line_pointer;
791 input_line_pointer = *str;
792 in_my_get_expression = 1;
793 seg = expression (ep);
794 in_my_get_expression = 0;
796 if (ep->X_op == O_illegal)
798 /* We found a bad expression in md_operand(). */
799 *str = input_line_pointer;
800 input_line_pointer = save_in;
801 if (inst.error == NULL)
802 inst.error = _("bad expression");
807 if (seg != absolute_section
808 && seg != text_section
809 && seg != data_section
810 && seg != bss_section
811 && seg != undefined_section)
813 inst.error = _("bad segment");
814 *str = input_line_pointer;
815 input_line_pointer = save_in;
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode != GE_OPT_PREFIX_BIG
824 && (ep->X_op == O_big
826 && (walk_no_bignums (ep->X_add_symbol)
828 && walk_no_bignums (ep->X_op_symbol))))))
830 inst.error = _("invalid constant");
831 *str = input_line_pointer;
832 input_line_pointer = save_in;
836 *str = input_line_pointer;
837 input_line_pointer = save_in;
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
856 md_atof (int type, char * litP, int * sizeP)
859 LITTLENUM_TYPE words[MAX_LITTLENUMS];
891 return _("bad call to MD_ATOF()");
894 t = atof_ieee (input_line_pointer, type, words);
896 input_line_pointer = t;
899 if (target_big_endian)
901 for (i = 0; i < prec; i++)
903 md_number_to_chars (litP, (valueT) words[i], 2);
909 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
910 for (i = prec - 1; i >= 0; i--)
912 md_number_to_chars (litP, (valueT) words[i], 2);
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i = 0; i < prec; i += 2)
920 md_number_to_chars (litP, (valueT) words[i + 1], 2);
921 md_number_to_chars (litP + 2, (valueT) words[i], 2);
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
932 md_operand (expressionS * expr)
934 if (in_my_get_expression)
935 expr->X_op = O_illegal;
938 /* Immediate values. */
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
945 immediate_for_directive (int *val)
948 exp.X_op = O_illegal;
950 if (is_immediate_prefix (*input_line_pointer))
952 input_line_pointer++;
956 if (exp.X_op != O_constant)
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
962 *val = exp.X_add_number;
967 /* Register parsing. */
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
974 static struct reg_entry *
975 arm_reg_parse_multi (char **ccp)
979 struct reg_entry *reg;
981 #ifdef REGISTER_PREFIX
982 if (*start != REGISTER_PREFIX)
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start == OPTIONAL_REGISTER_PREFIX)
992 if (!ISALPHA (*p) || !is_name_beginner (*p))
997 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
999 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1009 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1010 enum arm_reg_type type)
1012 /* Alternative syntaxes are accepted for a few register classes. */
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg && reg->type == REG_TYPE_CN)
1025 /* For backward compatibility, a bare number is valid here. */
1027 unsigned long processor = strtoul (start, ccp, 10);
1028 if (*ccp != start && processor <= 15)
1032 case REG_TYPE_MMXWC:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg && reg->type == REG_TYPE_MMXWCG)
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1050 arm_reg_parse (char **ccp, enum arm_reg_type type)
1053 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1060 if (reg && reg->type == type)
1063 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1078 Can all be legally parsed by this function.
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1085 parse_neon_type (struct neon_type *type, char **str)
1092 while (type->elems < NEON_MAX_TYPE_ELS)
1094 enum neon_el_type thistype = NT_untyped;
1095 unsigned thissize = -1u;
1102 /* Just a size without an explicit type. */
1106 switch (TOLOWER (*ptr))
1108 case 'i': thistype = NT_integer; break;
1109 case 'f': thistype = NT_float; break;
1110 case 'p': thistype = NT_poly; break;
1111 case 's': thistype = NT_signed; break;
1112 case 'u': thistype = NT_unsigned; break;
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype == NT_float && !ISDIGIT (*ptr))
1126 thissize = strtoul (ptr, &ptr, 10);
1128 if (thissize != 8 && thissize != 16 && thissize != 32
1131 as_bad (_("bad size %d in type specifier"), thissize);
1138 type->el[type->elems].type = thistype;
1139 type->el[type->elems].size = thissize;
1144 /* Empty/missing type is not a successful parse. */
1145 if (type->elems == 0)
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1159 first_error (const char *err)
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1167 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1170 struct neon_type optype;
1174 if (parse_neon_type (&optype, &str) == SUCCESS)
1176 if (optype.elems == 1)
1177 *vectype = optype.el[0];
1180 first_error (_("only one type should be specified for operand"));
1186 first_error (_("vector type expected"));
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1210 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1211 enum arm_reg_type *rtype,
1212 struct neon_typed_alias *typeinfo)
1215 struct reg_entry *reg = arm_reg_parse_multi (&str);
1216 struct neon_typed_alias atype;
1217 struct neon_type_el parsetype;
1221 atype.eltype.type = NT_invtype;
1222 atype.eltype.size = -1;
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1228 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type == REG_TYPE_NDQ
1238 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1241 if (type != reg->type)
1247 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1249 if ((atype.defined & NTA_HASTYPE) != 0)
1251 first_error (_("can't redefine type for operand"));
1254 atype.defined |= NTA_HASTYPE;
1255 atype.eltype = parsetype;
1258 if (skip_past_char (&str, '[') == SUCCESS)
1260 if (type != REG_TYPE_VFD)
1262 first_error (_("only D registers may be indexed"));
1266 if ((atype.defined & NTA_HASINDEX) != 0)
1268 first_error (_("can't change index for operand"));
1272 atype.defined |= NTA_HASINDEX;
1274 if (skip_past_char (&str, ']') == SUCCESS)
1275 atype.index = NEON_ALL_LANES;
1280 my_get_expression (&exp, &str, GE_NO_PREFIX);
1282 if (exp.X_op != O_constant)
1284 first_error (_("constant expression required"));
1288 if (skip_past_char (&str, ']') == FAIL)
1291 atype.index = exp.X_add_number;
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1315 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1316 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1318 struct neon_typed_alias atype;
1320 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype.defined & NTA_HASINDEX) != 0)
1328 first_error (_("register operand expected, but got scalar"));
1333 *vectype = atype.eltype;
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1348 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1352 struct neon_typed_alias atype;
1354 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1356 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1359 if (atype.index == NEON_ALL_LANES)
1361 first_error (_("scalar must have an index"));
1364 else if (atype.index >= 64 / elsize)
1366 first_error (_("scalar index out of range"));
1371 *type = atype.eltype;
1375 return reg * 16 + atype.index;
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1380 parse_reg_list (char ** strp)
1382 char * str = * strp;
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1401 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1403 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1413 first_error (_("bad range in register list"));
1417 for (i = cur_reg + 1; i < reg; i++)
1419 if (range & (1 << i))
1421 (_("Warning: duplicated register (r%d) in register list"),
1429 if (range & (1 << reg))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1432 else if (reg <= cur_reg)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1438 while (skip_past_comma (&str) != FAIL
1439 || (in_range = 1, *str++ == '-'));
1444 first_error (_("missing `}'"));
1452 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1455 if (expr.X_op == O_constant)
1457 if (expr.X_add_number
1458 != (expr.X_add_number & 0x0000ffff))
1460 inst.error = _("invalid register mask");
1464 if ((range & expr.X_add_number) != 0)
1466 int regno = range & expr.X_add_number;
1469 regno = (1 << regno) - 1;
1471 (_("Warning: duplicated register (r%d) in register list"),
1475 range |= expr.X_add_number;
1479 if (inst.reloc.type != 0)
1481 inst.error = _("expression too complex");
1485 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1486 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1487 inst.reloc.pc_rel = 0;
1491 if (*str == '|' || *str == '+')
1497 while (another_range);
1503 /* Types of registers in a list. */
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1528 parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1532 enum arm_reg_type regtype = 0;
1536 unsigned long mask = 0;
1541 inst.error = _("expecting {");
1550 regtype = REG_TYPE_VFS;
1555 regtype = REG_TYPE_VFD;
1558 case REGLIST_NEON_D:
1559 regtype = REG_TYPE_NDQ;
1563 if (etype != REGLIST_VFP_S)
1565 /* VFPv3 allows 32 D registers. */
1566 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1570 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1573 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1580 base_reg = max_regs;
1584 int setmask = 1, addregs = 1;
1586 new_base = arm_typed_reg_parse (str, regtype, ®type, NULL);
1588 if (new_base == FAIL)
1590 first_error (_(reg_expected_msgs[regtype]));
1594 if (new_base >= max_regs)
1596 first_error (_("register out of range in list"));
1600 /* Note: a value of 2 * n is returned for the register Q<n>. */
1601 if (regtype == REG_TYPE_NQ)
1607 if (new_base < base_reg)
1608 base_reg = new_base;
1610 if (mask & (setmask << new_base))
1612 first_error (_("invalid register list"));
1616 if ((mask >> new_base) != 0 && ! warned)
1618 as_tsktsk (_("register list not in ascending order"));
1622 mask |= setmask << new_base;
1625 if (**str == '-') /* We have the start of a range expression */
1631 if ((high_range = arm_typed_reg_parse (str, regtype, NULL, NULL))
1634 inst.error = gettext (reg_expected_msgs[regtype]);
1638 if (high_range >= max_regs)
1640 first_error (_("register out of range in list"));
1644 if (regtype == REG_TYPE_NQ)
1645 high_range = high_range + 1;
1647 if (high_range <= new_base)
1649 inst.error = _("register range not in ascending order");
1653 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1655 if (mask & (setmask << new_base))
1657 inst.error = _("invalid register list");
1661 mask |= setmask << new_base;
1666 while (skip_past_comma (str) != FAIL);
1670 /* Sanity check -- should have raised a parse error above. */
1671 if (count == 0 || count > max_regs)
1676 /* Final test -- the registers must be consecutive. */
1678 for (i = 0; i < count; i++)
1680 if ((mask & (1u << i)) == 0)
1682 inst.error = _("non-contiguous register range");
1690 /* True if two alias types are the same. */
1693 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1701 if (a->defined != b->defined)
1704 if ((a->defined & NTA_HASTYPE) != 0
1705 && (a->eltype.type != b->eltype.type
1706 || a->eltype.size != b->eltype.size))
1709 if ((a->defined & NTA_HASINDEX) != 0
1710 && (a->index != b->index))
1716 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1717 The base register is put in *PBASE.
1718 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1720 The register stride (minus one) is put in bit 4 of the return value.
1721 Bits [6:5] encode the list length (minus one).
1722 The type of the list elements is put in *ELTYPE, if non-NULL. */
1724 #define NEON_LANE(X) ((X) & 0xf)
1725 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1726 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1729 parse_neon_el_struct_list (char **str, unsigned *pbase,
1730 struct neon_type_el *eltype)
1737 int leading_brace = 0;
1738 enum arm_reg_type rtype = REG_TYPE_NDQ;
1740 const char *const incr_error = "register stride must be 1 or 2";
1741 const char *const type_error = "mismatched element/structure types in list";
1742 struct neon_typed_alias firsttype;
1744 if (skip_past_char (&ptr, '{') == SUCCESS)
1749 struct neon_typed_alias atype;
1750 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1754 first_error (_(reg_expected_msgs[rtype]));
1761 if (rtype == REG_TYPE_NQ)
1768 else if (reg_incr == -1)
1770 reg_incr = getreg - base_reg;
1771 if (reg_incr < 1 || reg_incr > 2)
1773 first_error (_(incr_error));
1777 else if (getreg != base_reg + reg_incr * count)
1779 first_error (_(incr_error));
1783 if (!neon_alias_types_same (&atype, &firsttype))
1785 first_error (_(type_error));
1789 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1793 struct neon_typed_alias htype;
1794 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1796 lane = NEON_INTERLEAVE_LANES;
1797 else if (lane != NEON_INTERLEAVE_LANES)
1799 first_error (_(type_error));
1804 else if (reg_incr != 1)
1806 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1810 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1813 first_error (_(reg_expected_msgs[rtype]));
1816 if (!neon_alias_types_same (&htype, &firsttype))
1818 first_error (_(type_error));
1821 count += hireg + dregs - getreg;
1825 /* If we're using Q registers, we can't use [] or [n] syntax. */
1826 if (rtype == REG_TYPE_NQ)
1832 if ((atype.defined & NTA_HASINDEX) != 0)
1836 else if (lane != atype.index)
1838 first_error (_(type_error));
1842 else if (lane == -1)
1843 lane = NEON_INTERLEAVE_LANES;
1844 else if (lane != NEON_INTERLEAVE_LANES)
1846 first_error (_(type_error));
1851 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1853 /* No lane set by [x]. We must be interleaving structures. */
1855 lane = NEON_INTERLEAVE_LANES;
1858 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1859 || (count > 1 && reg_incr == -1))
1861 first_error (_("error parsing element/structure list"));
1865 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1867 first_error (_("expected }"));
1875 *eltype = firsttype.eltype;
1880 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1883 /* Parse an explicit relocation suffix on an expression. This is
1884 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1885 arm_reloc_hsh contains no entries, so this function can only
1886 succeed if there is no () after the word. Returns -1 on error,
1887 BFD_RELOC_UNUSED if there wasn't any suffix. */
1889 parse_reloc (char **str)
1891 struct reloc_entry *r;
1895 return BFD_RELOC_UNUSED;
1900 while (*q && *q != ')' && *q != ',')
1905 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1912 /* Directives: register aliases. */
1914 static struct reg_entry *
1915 insert_reg_alias (char *str, int number, int type)
1917 struct reg_entry *new;
1920 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1923 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1925 /* Only warn about a redefinition if it's not defined as the
1927 else if (new->number != number || new->type != type)
1928 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1933 name = xstrdup (str);
1934 new = xmalloc (sizeof (struct reg_entry));
1937 new->number = number;
1939 new->builtin = FALSE;
1942 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1949 insert_neon_reg_alias (char *str, int number, int type,
1950 struct neon_typed_alias *atype)
1952 struct reg_entry *reg = insert_reg_alias (str, number, type);
1956 first_error (_("attempt to redefine typed alias"));
1962 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1963 *reg->neon = *atype;
1967 /* Look for the .req directive. This is of the form:
1969 new_register_name .req existing_register_name
1971 If we find one, or if it looks sufficiently like one that we want to
1972 handle any error here, return non-zero. Otherwise return zero. */
1975 create_register_alias (char * newname, char *p)
1977 struct reg_entry *old;
1978 char *oldname, *nbuf;
1981 /* The input scrubber ensures that whitespace after the mnemonic is
1982 collapsed to single spaces. */
1984 if (strncmp (oldname, " .req ", 6) != 0)
1988 if (*oldname == '\0')
1991 old = hash_find (arm_reg_hsh, oldname);
1994 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1998 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1999 the desired alias name, and p points to its end. If not, then
2000 the desired alias name is in the global original_case_string. */
2001 #ifdef TC_CASE_SENSITIVE
2004 newname = original_case_string;
2005 nlen = strlen (newname);
2008 nbuf = alloca (nlen + 1);
2009 memcpy (nbuf, newname, nlen);
2012 /* Create aliases under the new name as stated; an all-lowercase
2013 version of the new name; and an all-uppercase version of the new
2015 insert_reg_alias (nbuf, old->number, old->type);
2017 for (p = nbuf; *p; p++)
2020 if (strncmp (nbuf, newname, nlen))
2021 insert_reg_alias (nbuf, old->number, old->type);
2023 for (p = nbuf; *p; p++)
2026 if (strncmp (nbuf, newname, nlen))
2027 insert_reg_alias (nbuf, old->number, old->type);
2032 /* Create a Neon typed/indexed register alias using directives, e.g.:
2037 These typed registers can be used instead of the types specified after the
2038 Neon mnemonic, so long as all operands given have types. Types can also be
2039 specified directly, e.g.:
2040 vadd d0.s32, d1.s32, d2.s32
2044 create_neon_reg_alias (char *newname, char *p)
2046 enum arm_reg_type basetype;
2047 struct reg_entry *basereg;
2048 struct reg_entry mybasereg;
2049 struct neon_type ntype;
2050 struct neon_typed_alias typeinfo;
2051 char *namebuf, *nameend;
2054 typeinfo.defined = 0;
2055 typeinfo.eltype.type = NT_invtype;
2056 typeinfo.eltype.size = -1;
2057 typeinfo.index = -1;
2061 if (strncmp (p, " .dn ", 5) == 0)
2062 basetype = REG_TYPE_VFD;
2063 else if (strncmp (p, " .qn ", 5) == 0)
2064 basetype = REG_TYPE_NQ;
2073 basereg = arm_reg_parse_multi (&p);
2075 if (basereg && basereg->type != basetype)
2077 as_bad (_("bad type for register"));
2081 if (basereg == NULL)
2084 /* Try parsing as an integer. */
2085 my_get_expression (&exp, &p, GE_NO_PREFIX);
2086 if (exp.X_op != O_constant)
2088 as_bad (_("expression must be constant"));
2091 basereg = &mybasereg;
2092 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2098 typeinfo = *basereg->neon;
2100 if (parse_neon_type (&ntype, &p) == SUCCESS)
2102 /* We got a type. */
2103 if (typeinfo.defined & NTA_HASTYPE)
2105 as_bad (_("can't redefine the type of a register alias"));
2109 typeinfo.defined |= NTA_HASTYPE;
2110 if (ntype.elems != 1)
2112 as_bad (_("you must specify a single type only"));
2115 typeinfo.eltype = ntype.el[0];
2118 if (skip_past_char (&p, '[') == SUCCESS)
2121 /* We got a scalar index. */
2123 if (typeinfo.defined & NTA_HASINDEX)
2125 as_bad (_("can't redefine the index of a scalar alias"));
2129 my_get_expression (&exp, &p, GE_NO_PREFIX);
2131 if (exp.X_op != O_constant)
2133 as_bad (_("scalar index must be constant"));
2137 typeinfo.defined |= NTA_HASINDEX;
2138 typeinfo.index = exp.X_add_number;
2140 if (skip_past_char (&p, ']') == FAIL)
2142 as_bad (_("expecting ]"));
2147 namelen = nameend - newname;
2148 namebuf = alloca (namelen + 1);
2149 strncpy (namebuf, newname, namelen);
2150 namebuf[namelen] = '\0';
2152 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2153 typeinfo.defined != 0 ? &typeinfo : NULL);
2155 /* Insert name in all uppercase. */
2156 for (p = namebuf; *p; p++)
2159 if (strncmp (namebuf, newname, namelen))
2160 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2161 typeinfo.defined != 0 ? &typeinfo : NULL);
2163 /* Insert name in all lowercase. */
2164 for (p = namebuf; *p; p++)
2167 if (strncmp (namebuf, newname, namelen))
2168 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2169 typeinfo.defined != 0 ? &typeinfo : NULL);
2174 /* Should never be called, as .req goes between the alias and the
2175 register name, not at the beginning of the line. */
2177 s_req (int a ATTRIBUTE_UNUSED)
2179 as_bad (_("invalid syntax for .req directive"));
2183 s_dn (int a ATTRIBUTE_UNUSED)
2185 as_bad (_("invalid syntax for .dn directive"));
2189 s_qn (int a ATTRIBUTE_UNUSED)
2191 as_bad (_("invalid syntax for .qn directive"));
2194 /* The .unreq directive deletes an alias which was previously defined
2195 by .req. For example:
2201 s_unreq (int a ATTRIBUTE_UNUSED)
2206 name = input_line_pointer;
2208 while (*input_line_pointer != 0
2209 && *input_line_pointer != ' '
2210 && *input_line_pointer != '\n')
2211 ++input_line_pointer;
2213 saved_char = *input_line_pointer;
2214 *input_line_pointer = 0;
2217 as_bad (_("invalid syntax for .unreq directive"));
2220 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2223 as_bad (_("unknown register alias '%s'"), name);
2224 else if (reg->builtin)
2225 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2229 hash_delete (arm_reg_hsh, name);
2230 free ((char *) reg->name);
2237 *input_line_pointer = saved_char;
2238 demand_empty_rest_of_line ();
2241 /* Directives: Instruction set selection. */
2244 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2245 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2246 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2247 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2249 static enum mstate mapstate = MAP_UNDEFINED;
2252 mapping_state (enum mstate state)
2255 const char * symname;
2258 if (mapstate == state)
2259 /* The mapping symbol has already been emitted.
2260 There is nothing else to do. */
2269 type = BSF_NO_FLAGS;
2273 type = BSF_NO_FLAGS;
2277 type = BSF_NO_FLAGS;
2285 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2287 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2288 symbol_table_insert (symbolP);
2289 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2294 THUMB_SET_FUNC (symbolP, 0);
2295 ARM_SET_THUMB (symbolP, 0);
2296 ARM_SET_INTERWORK (symbolP, support_interwork);
2300 THUMB_SET_FUNC (symbolP, 1);
2301 ARM_SET_THUMB (symbolP, 1);
2302 ARM_SET_INTERWORK (symbolP, support_interwork);
2311 #define mapping_state(x) /* nothing */
2314 /* Find the real, Thumb encoded start of a Thumb function. */
2317 find_real_start (symbolS * symbolP)
2320 const char * name = S_GET_NAME (symbolP);
2321 symbolS * new_target;
2323 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2324 #define STUB_NAME ".real_start_of"
2329 /* The compiler may generate BL instructions to local labels because
2330 it needs to perform a branch to a far away location. These labels
2331 do not have a corresponding ".real_start_of" label. We check
2332 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2333 the ".real_start_of" convention for nonlocal branches. */
2334 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2337 real_start = ACONCAT ((STUB_NAME, name, NULL));
2338 new_target = symbol_find (real_start);
2340 if (new_target == NULL)
2342 as_warn ("Failed to find real start of function: %s\n", name);
2343 new_target = symbolP;
2350 opcode_select (int width)
2357 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2358 as_bad (_("selected processor does not support THUMB opcodes"));
2361 /* No need to force the alignment, since we will have been
2362 coming from ARM mode, which is word-aligned. */
2363 record_alignment (now_seg, 1);
2365 mapping_state (MAP_THUMB);
2371 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2372 as_bad (_("selected processor does not support ARM opcodes"));
2377 frag_align (2, 0, 0);
2379 record_alignment (now_seg, 1);
2381 mapping_state (MAP_ARM);
2385 as_bad (_("invalid instruction size selected (%d)"), width);
2390 s_arm (int ignore ATTRIBUTE_UNUSED)
2393 demand_empty_rest_of_line ();
2397 s_thumb (int ignore ATTRIBUTE_UNUSED)
2400 demand_empty_rest_of_line ();
2404 s_code (int unused ATTRIBUTE_UNUSED)
2408 temp = get_absolute_expression ();
2413 opcode_select (temp);
2417 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2422 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2424 /* If we are not already in thumb mode go into it, EVEN if
2425 the target processor does not support thumb instructions.
2426 This is used by gcc/config/arm/lib1funcs.asm for example
2427 to compile interworking support functions even if the
2428 target processor should not support interworking. */
2432 record_alignment (now_seg, 1);
2435 demand_empty_rest_of_line ();
2439 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2443 /* The following label is the name/address of the start of a Thumb function.
2444 We need to know this for the interworking support. */
2445 label_is_thumb_function_name = TRUE;
2448 /* Perform a .set directive, but also mark the alias as
2449 being a thumb function. */
2452 s_thumb_set (int equiv)
2454 /* XXX the following is a duplicate of the code for s_set() in read.c
2455 We cannot just call that code as we need to get at the symbol that
2462 /* Especial apologies for the random logic:
2463 This just grew, and could be parsed much more simply!
2465 name = input_line_pointer;
2466 delim = get_symbol_end ();
2467 end_name = input_line_pointer;
2470 if (*input_line_pointer != ',')
2473 as_bad (_("expected comma after name \"%s\""), name);
2475 ignore_rest_of_line ();
2479 input_line_pointer++;
2482 if (name[0] == '.' && name[1] == '\0')
2484 /* XXX - this should not happen to .thumb_set. */
2488 if ((symbolP = symbol_find (name)) == NULL
2489 && (symbolP = md_undefined_symbol (name)) == NULL)
2492 /* When doing symbol listings, play games with dummy fragments living
2493 outside the normal fragment chain to record the file and line info
2495 if (listing & LISTING_SYMBOLS)
2497 extern struct list_info_struct * listing_tail;
2498 fragS * dummy_frag = xmalloc (sizeof (fragS));
2500 memset (dummy_frag, 0, sizeof (fragS));
2501 dummy_frag->fr_type = rs_fill;
2502 dummy_frag->line = listing_tail;
2503 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2504 dummy_frag->fr_symbol = symbolP;
2508 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2511 /* "set" symbols are local unless otherwise specified. */
2512 SF_SET_LOCAL (symbolP);
2513 #endif /* OBJ_COFF */
2514 } /* Make a new symbol. */
2516 symbol_table_insert (symbolP);
2521 && S_IS_DEFINED (symbolP)
2522 && S_GET_SEGMENT (symbolP) != reg_section)
2523 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2525 pseudo_set (symbolP);
2527 demand_empty_rest_of_line ();
2529 /* XXX Now we come to the Thumb specific bit of code. */
2531 THUMB_SET_FUNC (symbolP, 1);
2532 ARM_SET_THUMB (symbolP, 1);
2533 #if defined OBJ_ELF || defined OBJ_COFF
2534 ARM_SET_INTERWORK (symbolP, support_interwork);
2538 /* Directives: Mode selection. */
2540 /* .syntax [unified|divided] - choose the new unified syntax
2541 (same for Arm and Thumb encoding, modulo slight differences in what
2542 can be represented) or the old divergent syntax for each mode. */
2544 s_syntax (int unused ATTRIBUTE_UNUSED)
2548 name = input_line_pointer;
2549 delim = get_symbol_end ();
2551 if (!strcasecmp (name, "unified"))
2552 unified_syntax = TRUE;
2553 else if (!strcasecmp (name, "divided"))
2554 unified_syntax = FALSE;
2557 as_bad (_("unrecognized syntax mode \"%s\""), name);
2560 *input_line_pointer = delim;
2561 demand_empty_rest_of_line ();
2564 /* Directives: sectioning and alignment. */
2566 /* Same as s_align_ptwo but align 0 => align 2. */
2569 s_align (int unused ATTRIBUTE_UNUSED)
2573 long max_alignment = 15;
2575 temp = get_absolute_expression ();
2576 if (temp > max_alignment)
2577 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2580 as_bad (_("alignment negative. 0 assumed."));
2584 if (*input_line_pointer == ',')
2586 input_line_pointer++;
2587 temp_fill = get_absolute_expression ();
2595 /* Only make a frag if we HAVE to. */
2596 if (temp && !need_pass_2)
2597 frag_align (temp, (int) temp_fill, 0);
2598 demand_empty_rest_of_line ();
2600 record_alignment (now_seg, temp);
2604 s_bss (int ignore ATTRIBUTE_UNUSED)
2606 /* We don't support putting frags in the BSS segment, we fake it by
2607 marking in_bss, then looking at s_skip for clues. */
2608 subseg_set (bss_section, 0);
2609 demand_empty_rest_of_line ();
2610 mapping_state (MAP_DATA);
2614 s_even (int ignore ATTRIBUTE_UNUSED)
2616 /* Never make frag if expect extra pass. */
2618 frag_align (1, 0, 0);
2620 record_alignment (now_seg, 1);
2622 demand_empty_rest_of_line ();
2625 /* Directives: Literal pools. */
2627 static literal_pool *
2628 find_literal_pool (void)
2630 literal_pool * pool;
2632 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2634 if (pool->section == now_seg
2635 && pool->sub_section == now_subseg)
2642 static literal_pool *
2643 find_or_make_literal_pool (void)
2645 /* Next literal pool ID number. */
2646 static unsigned int latest_pool_num = 1;
2647 literal_pool * pool;
2649 pool = find_literal_pool ();
2653 /* Create a new pool. */
2654 pool = xmalloc (sizeof (* pool));
2658 pool->next_free_entry = 0;
2659 pool->section = now_seg;
2660 pool->sub_section = now_subseg;
2661 pool->next = list_of_pools;
2662 pool->symbol = NULL;
2664 /* Add it to the list. */
2665 list_of_pools = pool;
2668 /* New pools, and emptied pools, will have a NULL symbol. */
2669 if (pool->symbol == NULL)
2671 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2672 (valueT) 0, &zero_address_frag);
2673 pool->id = latest_pool_num ++;
2680 /* Add the literal in the global 'inst'
2681 structure to the relevent literal pool. */
2684 add_to_lit_pool (void)
2686 literal_pool * pool;
2689 pool = find_or_make_literal_pool ();
2691 /* Check if this literal value is already in the pool. */
2692 for (entry = 0; entry < pool->next_free_entry; entry ++)
2694 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2695 && (inst.reloc.exp.X_op == O_constant)
2696 && (pool->literals[entry].X_add_number
2697 == inst.reloc.exp.X_add_number)
2698 && (pool->literals[entry].X_unsigned
2699 == inst.reloc.exp.X_unsigned))
2702 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2703 && (inst.reloc.exp.X_op == O_symbol)
2704 && (pool->literals[entry].X_add_number
2705 == inst.reloc.exp.X_add_number)
2706 && (pool->literals[entry].X_add_symbol
2707 == inst.reloc.exp.X_add_symbol)
2708 && (pool->literals[entry].X_op_symbol
2709 == inst.reloc.exp.X_op_symbol))
2713 /* Do we need to create a new entry? */
2714 if (entry == pool->next_free_entry)
2716 if (entry >= MAX_LITERAL_POOL_SIZE)
2718 inst.error = _("literal pool overflow");
2722 pool->literals[entry] = inst.reloc.exp;
2723 pool->next_free_entry += 1;
2726 inst.reloc.exp.X_op = O_symbol;
2727 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2728 inst.reloc.exp.X_add_symbol = pool->symbol;
2733 /* Can't use symbol_new here, so have to create a symbol and then at
2734 a later date assign it a value. Thats what these functions do. */
2737 symbol_locate (symbolS * symbolP,
2738 const char * name, /* It is copied, the caller can modify. */
2739 segT segment, /* Segment identifier (SEG_<something>). */
2740 valueT valu, /* Symbol value. */
2741 fragS * frag) /* Associated fragment. */
2743 unsigned int name_length;
2744 char * preserved_copy_of_name;
2746 name_length = strlen (name) + 1; /* +1 for \0. */
2747 obstack_grow (¬es, name, name_length);
2748 preserved_copy_of_name = obstack_finish (¬es);
2750 #ifdef tc_canonicalize_symbol_name
2751 preserved_copy_of_name =
2752 tc_canonicalize_symbol_name (preserved_copy_of_name);
2755 S_SET_NAME (symbolP, preserved_copy_of_name);
2757 S_SET_SEGMENT (symbolP, segment);
2758 S_SET_VALUE (symbolP, valu);
2759 symbol_clear_list_pointers (symbolP);
2761 symbol_set_frag (symbolP, frag);
2763 /* Link to end of symbol chain. */
2765 extern int symbol_table_frozen;
2767 if (symbol_table_frozen)
2771 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2773 obj_symbol_new_hook (symbolP);
2775 #ifdef tc_symbol_new_hook
2776 tc_symbol_new_hook (symbolP);
2780 verify_symbol_chain (symbol_rootP, symbol_lastP);
2781 #endif /* DEBUG_SYMS */
2786 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2789 literal_pool * pool;
2792 pool = find_literal_pool ();
2794 || pool->symbol == NULL
2795 || pool->next_free_entry == 0)
2798 mapping_state (MAP_DATA);
2800 /* Align pool as you have word accesses.
2801 Only make a frag if we have to. */
2803 frag_align (2, 0, 0);
2805 record_alignment (now_seg, 2);
2807 sprintf (sym_name, "$$lit_\002%x", pool->id);
2809 symbol_locate (pool->symbol, sym_name, now_seg,
2810 (valueT) frag_now_fix (), frag_now);
2811 symbol_table_insert (pool->symbol);
2813 ARM_SET_THUMB (pool->symbol, thumb_mode);
2815 #if defined OBJ_COFF || defined OBJ_ELF
2816 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2819 for (entry = 0; entry < pool->next_free_entry; entry ++)
2820 /* First output the expression in the instruction to the pool. */
2821 emit_expr (&(pool->literals[entry]), 4); /* .word */
2823 /* Mark the pool as empty. */
2824 pool->next_free_entry = 0;
2825 pool->symbol = NULL;
2829 /* Forward declarations for functions below, in the MD interface
2831 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2832 static valueT create_unwind_entry (int);
2833 static void start_unwind_section (const segT, int);
2834 static void add_unwind_opcode (valueT, int);
2835 static void flush_pending_unwind (void);
2837 /* Directives: Data. */
2840 s_arm_elf_cons (int nbytes)
2844 #ifdef md_flush_pending_output
2845 md_flush_pending_output ();
2848 if (is_it_end_of_statement ())
2850 demand_empty_rest_of_line ();
2854 #ifdef md_cons_align
2855 md_cons_align (nbytes);
2858 mapping_state (MAP_DATA);
2862 char *base = input_line_pointer;
2866 if (exp.X_op != O_symbol)
2867 emit_expr (&exp, (unsigned int) nbytes);
2870 char *before_reloc = input_line_pointer;
2871 reloc = parse_reloc (&input_line_pointer);
2874 as_bad (_("unrecognized relocation suffix"));
2875 ignore_rest_of_line ();
2878 else if (reloc == BFD_RELOC_UNUSED)
2879 emit_expr (&exp, (unsigned int) nbytes);
2882 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2883 int size = bfd_get_reloc_size (howto);
2885 if (reloc == BFD_RELOC_ARM_PLT32)
2887 as_bad (_("(plt) is only valid on branch targets"));
2888 reloc = BFD_RELOC_UNUSED;
2893 as_bad (_("%s relocations do not fit in %d bytes"),
2894 howto->name, nbytes);
2897 /* We've parsed an expression stopping at O_symbol.
2898 But there may be more expression left now that we
2899 have parsed the relocation marker. Parse it again.
2900 XXX Surely there is a cleaner way to do this. */
2901 char *p = input_line_pointer;
2903 char *save_buf = alloca (input_line_pointer - base);
2904 memcpy (save_buf, base, input_line_pointer - base);
2905 memmove (base + (input_line_pointer - before_reloc),
2906 base, before_reloc - base);
2908 input_line_pointer = base + (input_line_pointer-before_reloc);
2910 memcpy (base, save_buf, p - base);
2912 offset = nbytes - size;
2913 p = frag_more ((int) nbytes);
2914 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2915 size, &exp, 0, reloc);
2920 while (*input_line_pointer++ == ',');
2922 /* Put terminator back into stream. */
2923 input_line_pointer --;
2924 demand_empty_rest_of_line ();
2928 /* Parse a .rel31 directive. */
2931 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2938 if (*input_line_pointer == '1')
2939 highbit = 0x80000000;
2940 else if (*input_line_pointer != '0')
2941 as_bad (_("expected 0 or 1"));
2943 input_line_pointer++;
2944 if (*input_line_pointer != ',')
2945 as_bad (_("missing comma"));
2946 input_line_pointer++;
2948 #ifdef md_flush_pending_output
2949 md_flush_pending_output ();
2952 #ifdef md_cons_align
2956 mapping_state (MAP_DATA);
2961 md_number_to_chars (p, highbit, 4);
2962 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2963 BFD_RELOC_ARM_PREL31);
2965 demand_empty_rest_of_line ();
2968 /* Directives: AEABI stack-unwind tables. */
2970 /* Parse an unwind_fnstart directive. Simply records the current location. */
2973 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2975 demand_empty_rest_of_line ();
2976 /* Mark the start of the function. */
2977 unwind.proc_start = expr_build_dot ();
2979 /* Reset the rest of the unwind info. */
2980 unwind.opcode_count = 0;
2981 unwind.table_entry = NULL;
2982 unwind.personality_routine = NULL;
2983 unwind.personality_index = -1;
2984 unwind.frame_size = 0;
2985 unwind.fp_offset = 0;
2988 unwind.sp_restored = 0;
2992 /* Parse a handlerdata directive. Creates the exception handling table entry
2993 for the function. */
2996 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2998 demand_empty_rest_of_line ();
2999 if (unwind.table_entry)
3000 as_bad (_("dupicate .handlerdata directive"));
3002 create_unwind_entry (1);
3005 /* Parse an unwind_fnend directive. Generates the index table entry. */
3008 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3014 demand_empty_rest_of_line ();
3016 /* Add eh table entry. */
3017 if (unwind.table_entry == NULL)
3018 val = create_unwind_entry (0);
3022 /* Add index table entry. This is two words. */
3023 start_unwind_section (unwind.saved_seg, 1);
3024 frag_align (2, 0, 0);
3025 record_alignment (now_seg, 2);
3027 ptr = frag_more (8);
3028 where = frag_now_fix () - 8;
3030 /* Self relative offset of the function start. */
3031 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3032 BFD_RELOC_ARM_PREL31);
3034 /* Indicate dependency on EHABI-defined personality routines to the
3035 linker, if it hasn't been done already. */
3036 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3037 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3039 static const char *const name[] = {
3040 "__aeabi_unwind_cpp_pr0",
3041 "__aeabi_unwind_cpp_pr1",
3042 "__aeabi_unwind_cpp_pr2"
3044 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3045 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3046 marked_pr_dependency |= 1 << unwind.personality_index;
3047 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3048 = marked_pr_dependency;
3052 /* Inline exception table entry. */
3053 md_number_to_chars (ptr + 4, val, 4);
3055 /* Self relative offset of the table entry. */
3056 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3057 BFD_RELOC_ARM_PREL31);
3059 /* Restore the original section. */
3060 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3064 /* Parse an unwind_cantunwind directive. */
3067 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3069 demand_empty_rest_of_line ();
3070 if (unwind.personality_routine || unwind.personality_index != -1)
3071 as_bad (_("personality routine specified for cantunwind frame"));
3073 unwind.personality_index = -2;
3077 /* Parse a personalityindex directive. */
3080 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3084 if (unwind.personality_routine || unwind.personality_index != -1)
3085 as_bad (_("duplicate .personalityindex directive"));
3089 if (exp.X_op != O_constant
3090 || exp.X_add_number < 0 || exp.X_add_number > 15)
3092 as_bad (_("bad personality routine number"));
3093 ignore_rest_of_line ();
3097 unwind.personality_index = exp.X_add_number;
3099 demand_empty_rest_of_line ();
3103 /* Parse a personality directive. */
3106 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3110 if (unwind.personality_routine || unwind.personality_index != -1)
3111 as_bad (_("duplicate .personality directive"));
3113 name = input_line_pointer;
3114 c = get_symbol_end ();
3115 p = input_line_pointer;
3116 unwind.personality_routine = symbol_find_or_make (name);
3118 demand_empty_rest_of_line ();
3122 /* Parse a directive saving core registers. */
3125 s_arm_unwind_save_core (void)
3131 range = parse_reg_list (&input_line_pointer);
3134 as_bad (_("expected register list"));
3135 ignore_rest_of_line ();
3139 demand_empty_rest_of_line ();
3141 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3142 into .unwind_save {..., sp...}. We aren't bothered about the value of
3143 ip because it is clobbered by calls. */
3144 if (unwind.sp_restored && unwind.fp_reg == 12
3145 && (range & 0x3000) == 0x1000)
3147 unwind.opcode_count--;
3148 unwind.sp_restored = 0;
3149 range = (range | 0x2000) & ~0x1000;
3150 unwind.pending_offset = 0;
3156 /* See if we can use the short opcodes. These pop a block of up to 8
3157 registers starting with r4, plus maybe r14. */
3158 for (n = 0; n < 8; n++)
3160 /* Break at the first non-saved register. */
3161 if ((range & (1 << (n + 4))) == 0)
3164 /* See if there are any other bits set. */
3165 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3167 /* Use the long form. */
3168 op = 0x8000 | ((range >> 4) & 0xfff);
3169 add_unwind_opcode (op, 2);
3173 /* Use the short form. */
3175 op = 0xa8; /* Pop r14. */
3177 op = 0xa0; /* Do not pop r14. */
3179 add_unwind_opcode (op, 1);
3186 op = 0xb100 | (range & 0xf);
3187 add_unwind_opcode (op, 2);
3190 /* Record the number of bytes pushed. */
3191 for (n = 0; n < 16; n++)
3193 if (range & (1 << n))
3194 unwind.frame_size += 4;
3199 /* Parse a directive saving FPA registers. */
3202 s_arm_unwind_save_fpa (int reg)
3208 /* Get Number of registers to transfer. */
3209 if (skip_past_comma (&input_line_pointer) != FAIL)
3212 exp.X_op = O_illegal;
3214 if (exp.X_op != O_constant)
3216 as_bad (_("expected , <constant>"));
3217 ignore_rest_of_line ();
3221 num_regs = exp.X_add_number;
3223 if (num_regs < 1 || num_regs > 4)
3225 as_bad (_("number of registers must be in the range [1:4]"));
3226 ignore_rest_of_line ();
3230 demand_empty_rest_of_line ();
3235 op = 0xb4 | (num_regs - 1);
3236 add_unwind_opcode (op, 1);
3241 op = 0xc800 | (reg << 4) | (num_regs - 1);
3242 add_unwind_opcode (op, 2);
3244 unwind.frame_size += num_regs * 12;
3248 /* Parse a directive saving VFP registers. */
3251 s_arm_unwind_save_vfp (void)
3257 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
3260 as_bad (_("expected register list"));
3261 ignore_rest_of_line ();
3265 demand_empty_rest_of_line ();
3270 op = 0xb8 | (count - 1);
3271 add_unwind_opcode (op, 1);
3276 op = 0xb300 | (reg << 4) | (count - 1);
3277 add_unwind_opcode (op, 2);
3279 unwind.frame_size += count * 8 + 4;
3283 /* Parse a directive saving iWMMXt data registers. */
3286 s_arm_unwind_save_mmxwr (void)
3294 if (*input_line_pointer == '{')
3295 input_line_pointer++;
3299 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3303 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3308 as_tsktsk (_("register list not in ascending order"));
3311 if (*input_line_pointer == '-')
3313 input_line_pointer++;
3314 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3317 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3320 else if (reg >= hi_reg)
3322 as_bad (_("bad register range"));
3325 for (; reg < hi_reg; reg++)
3329 while (skip_past_comma (&input_line_pointer) != FAIL);
3331 if (*input_line_pointer == '}')
3332 input_line_pointer++;
3334 demand_empty_rest_of_line ();
3336 /* Generate any deferred opcodes because we're going to be looking at
3338 flush_pending_unwind ();
3340 for (i = 0; i < 16; i++)
3342 if (mask & (1 << i))
3343 unwind.frame_size += 8;
3346 /* Attempt to combine with a previous opcode. We do this because gcc
3347 likes to output separate unwind directives for a single block of
3349 if (unwind.opcode_count > 0)
3351 i = unwind.opcodes[unwind.opcode_count - 1];
3352 if ((i & 0xf8) == 0xc0)
3355 /* Only merge if the blocks are contiguous. */
3358 if ((mask & 0xfe00) == (1 << 9))
3360 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3361 unwind.opcode_count--;
3364 else if (i == 6 && unwind.opcode_count >= 2)
3366 i = unwind.opcodes[unwind.opcode_count - 2];
3370 op = 0xffff << (reg - 1);
3372 || ((mask & op) == (1u << (reg - 1))))
3374 op = (1 << (reg + i + 1)) - 1;
3375 op &= ~((1 << reg) - 1);
3377 unwind.opcode_count -= 2;
3384 /* We want to generate opcodes in the order the registers have been
3385 saved, ie. descending order. */
3386 for (reg = 15; reg >= -1; reg--)
3388 /* Save registers in blocks. */
3390 || !(mask & (1 << reg)))
3392 /* We found an unsaved reg. Generate opcodes to save the
3393 preceeding block. */
3399 op = 0xc0 | (hi_reg - 10);
3400 add_unwind_opcode (op, 1);
3405 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3406 add_unwind_opcode (op, 2);
3415 ignore_rest_of_line ();
3419 s_arm_unwind_save_mmxwcg (void)
3426 if (*input_line_pointer == '{')
3427 input_line_pointer++;
3431 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3435 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3441 as_tsktsk (_("register list not in ascending order"));
3444 if (*input_line_pointer == '-')
3446 input_line_pointer++;
3447 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3450 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3453 else if (reg >= hi_reg)
3455 as_bad (_("bad register range"));
3458 for (; reg < hi_reg; reg++)
3462 while (skip_past_comma (&input_line_pointer) != FAIL);
3464 if (*input_line_pointer == '}')
3465 input_line_pointer++;
3467 demand_empty_rest_of_line ();
3469 /* Generate any deferred opcodes because we're going to be looking at
3471 flush_pending_unwind ();
3473 for (reg = 0; reg < 16; reg++)
3475 if (mask & (1 << reg))
3476 unwind.frame_size += 4;
3479 add_unwind_opcode (op, 2);
3482 ignore_rest_of_line ();
3486 /* Parse an unwind_save directive. */
3489 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3492 struct reg_entry *reg;
3493 bfd_boolean had_brace = FALSE;
3495 /* Figure out what sort of save we have. */
3496 peek = input_line_pointer;
3504 reg = arm_reg_parse_multi (&peek);
3508 as_bad (_("register expected"));
3509 ignore_rest_of_line ();
3518 as_bad (_("FPA .unwind_save does not take a register list"));
3519 ignore_rest_of_line ();
3522 s_arm_unwind_save_fpa (reg->number);
3525 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3526 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3527 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3528 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3531 as_bad (_(".unwind_save does not support this kind of register"));
3532 ignore_rest_of_line ();
3537 /* Parse an unwind_movsp directive. */
3540 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3545 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3548 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3549 ignore_rest_of_line ();
3552 demand_empty_rest_of_line ();
3554 if (reg == REG_SP || reg == REG_PC)
3556 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3560 if (unwind.fp_reg != REG_SP)
3561 as_bad (_("unexpected .unwind_movsp directive"));
3563 /* Generate opcode to restore the value. */
3565 add_unwind_opcode (op, 1);
3567 /* Record the information for later. */
3568 unwind.fp_reg = reg;
3569 unwind.fp_offset = unwind.frame_size;
3570 unwind.sp_restored = 1;
3573 /* Parse an unwind_pad directive. */
3576 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3580 if (immediate_for_directive (&offset) == FAIL)
3585 as_bad (_("stack increment must be multiple of 4"));
3586 ignore_rest_of_line ();
3590 /* Don't generate any opcodes, just record the details for later. */
3591 unwind.frame_size += offset;
3592 unwind.pending_offset += offset;
3594 demand_empty_rest_of_line ();
3597 /* Parse an unwind_setfp directive. */
3600 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3606 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3607 if (skip_past_comma (&input_line_pointer) == FAIL)
3610 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3612 if (fp_reg == FAIL || sp_reg == FAIL)
3614 as_bad (_("expected <reg>, <reg>"));
3615 ignore_rest_of_line ();
3619 /* Optional constant. */
3620 if (skip_past_comma (&input_line_pointer) != FAIL)
3622 if (immediate_for_directive (&offset) == FAIL)
3628 demand_empty_rest_of_line ();
3630 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3632 as_bad (_("register must be either sp or set by a previous"
3633 "unwind_movsp directive"));
3637 /* Don't generate any opcodes, just record the information for later. */
3638 unwind.fp_reg = fp_reg;
3641 unwind.fp_offset = unwind.frame_size - offset;
3643 unwind.fp_offset -= offset;
3646 /* Parse an unwind_raw directive. */
3649 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3652 /* This is an arbitrary limit. */
3653 unsigned char op[16];
3657 if (exp.X_op == O_constant
3658 && skip_past_comma (&input_line_pointer) != FAIL)
3660 unwind.frame_size += exp.X_add_number;
3664 exp.X_op = O_illegal;
3666 if (exp.X_op != O_constant)
3668 as_bad (_("expected <offset>, <opcode>"));
3669 ignore_rest_of_line ();
3675 /* Parse the opcode. */
3680 as_bad (_("unwind opcode too long"));
3681 ignore_rest_of_line ();
3683 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3685 as_bad (_("invalid unwind opcode"));
3686 ignore_rest_of_line ();
3689 op[count++] = exp.X_add_number;
3691 /* Parse the next byte. */
3692 if (skip_past_comma (&input_line_pointer) == FAIL)
3698 /* Add the opcode bytes in reverse order. */
3700 add_unwind_opcode (op[count], 1);
3702 demand_empty_rest_of_line ();
3706 /* Parse a .eabi_attribute directive. */
3709 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3712 bfd_boolean is_string;
3719 if (exp.X_op != O_constant)
3722 tag = exp.X_add_number;
3723 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3728 if (skip_past_comma (&input_line_pointer) == FAIL)
3730 if (tag == 32 || !is_string)
3733 if (exp.X_op != O_constant)
3735 as_bad (_("expected numeric constant"));
3736 ignore_rest_of_line ();
3739 i = exp.X_add_number;
3741 if (tag == Tag_compatibility
3742 && skip_past_comma (&input_line_pointer) == FAIL)
3744 as_bad (_("expected comma"));
3745 ignore_rest_of_line ();
3750 skip_whitespace(input_line_pointer);
3751 if (*input_line_pointer != '"')
3753 input_line_pointer++;
3754 s = input_line_pointer;
3755 while (*input_line_pointer && *input_line_pointer != '"')
3756 input_line_pointer++;
3757 if (*input_line_pointer != '"')
3759 saved_char = *input_line_pointer;
3760 *input_line_pointer = 0;
3768 if (tag == Tag_compatibility)
3769 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3771 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3773 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3777 *input_line_pointer = saved_char;
3778 input_line_pointer++;
3780 demand_empty_rest_of_line ();
3783 as_bad (_("bad string constant"));
3784 ignore_rest_of_line ();
3787 as_bad (_("expected <tag> , <value>"));
3788 ignore_rest_of_line ();
3790 #endif /* OBJ_ELF */
3792 static void s_arm_arch (int);
3793 static void s_arm_cpu (int);
3794 static void s_arm_fpu (int);
3796 /* This table describes all the machine specific pseudo-ops the assembler
3797 has to support. The fields are:
3798 pseudo-op name without dot
3799 function to call to execute this pseudo-op
3800 Integer arg to pass to the function. */
3802 const pseudo_typeS md_pseudo_table[] =
3804 /* Never called because '.req' does not start a line. */
3805 { "req", s_req, 0 },
3806 /* Following two are likewise never called. */
3809 { "unreq", s_unreq, 0 },
3810 { "bss", s_bss, 0 },
3811 { "align", s_align, 0 },
3812 { "arm", s_arm, 0 },
3813 { "thumb", s_thumb, 0 },
3814 { "code", s_code, 0 },
3815 { "force_thumb", s_force_thumb, 0 },
3816 { "thumb_func", s_thumb_func, 0 },
3817 { "thumb_set", s_thumb_set, 0 },
3818 { "even", s_even, 0 },
3819 { "ltorg", s_ltorg, 0 },
3820 { "pool", s_ltorg, 0 },
3821 { "syntax", s_syntax, 0 },
3822 { "cpu", s_arm_cpu, 0 },
3823 { "arch", s_arm_arch, 0 },
3824 { "fpu", s_arm_fpu, 0 },
3826 { "word", s_arm_elf_cons, 4 },
3827 { "long", s_arm_elf_cons, 4 },
3828 { "rel31", s_arm_rel31, 0 },
3829 { "fnstart", s_arm_unwind_fnstart, 0 },
3830 { "fnend", s_arm_unwind_fnend, 0 },
3831 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3832 { "personality", s_arm_unwind_personality, 0 },
3833 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3834 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3835 { "save", s_arm_unwind_save, 0 },
3836 { "movsp", s_arm_unwind_movsp, 0 },
3837 { "pad", s_arm_unwind_pad, 0 },
3838 { "setfp", s_arm_unwind_setfp, 0 },
3839 { "unwind_raw", s_arm_unwind_raw, 0 },
3840 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3844 { "extend", float_cons, 'x' },
3845 { "ldouble", float_cons, 'x' },
3846 { "packed", float_cons, 'p' },
3850 /* Parser functions used exclusively in instruction operands. */
3852 /* Generic immediate-value read function for use in insn parsing.
3853 STR points to the beginning of the immediate (the leading #);
3854 VAL receives the value; if the value is outside [MIN, MAX]
3855 issue an error. PREFIX_OPT is true if the immediate prefix is
3859 parse_immediate (char **str, int *val, int min, int max,
3860 bfd_boolean prefix_opt)
3863 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3864 if (exp.X_op != O_constant)
3866 inst.error = _("constant expression required");
3870 if (exp.X_add_number < min || exp.X_add_number > max)
3872 inst.error = _("immediate value out of range");
3876 *val = exp.X_add_number;
3880 /* Less-generic immediate-value read function with the possibility of loading a
3881 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3882 instructions. Puts the result directly in inst.operands[i]. */
3885 parse_big_immediate (char **str, int i)
3890 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3892 if (exp.X_op == O_constant)
3893 inst.operands[i].imm = exp.X_add_number;
3894 else if (exp.X_op == O_big
3895 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3896 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3898 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3899 /* Bignums have their least significant bits in
3900 generic_bignum[0]. Make sure we put 32 bits in imm and
3901 32 bits in reg, in a (hopefully) portable way. */
3902 assert (parts != 0);
3903 inst.operands[i].imm = 0;
3904 for (j = 0; j < parts; j++, idx++)
3905 inst.operands[i].imm |= generic_bignum[idx]
3906 << (LITTLENUM_NUMBER_OF_BITS * j);
3907 inst.operands[i].reg = 0;
3908 for (j = 0; j < parts; j++, idx++)
3909 inst.operands[i].reg |= generic_bignum[idx]
3910 << (LITTLENUM_NUMBER_OF_BITS * j);
3911 inst.operands[i].regisimm = 1;
3921 /* Returns the pseudo-register number of an FPA immediate constant,
3922 or FAIL if there isn't a valid constant here. */
3925 parse_fpa_immediate (char ** str)
3927 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3933 /* First try and match exact strings, this is to guarantee
3934 that some formats will work even for cross assembly. */
3936 for (i = 0; fp_const[i]; i++)
3938 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3942 *str += strlen (fp_const[i]);
3943 if (is_end_of_line[(unsigned char) **str])
3949 /* Just because we didn't get a match doesn't mean that the constant
3950 isn't valid, just that it is in a format that we don't
3951 automatically recognize. Try parsing it with the standard
3952 expression routines. */
3954 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3956 /* Look for a raw floating point number. */
3957 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3958 && is_end_of_line[(unsigned char) *save_in])
3960 for (i = 0; i < NUM_FLOAT_VALS; i++)
3962 for (j = 0; j < MAX_LITTLENUMS; j++)
3964 if (words[j] != fp_values[i][j])
3968 if (j == MAX_LITTLENUMS)
3976 /* Try and parse a more complex expression, this will probably fail
3977 unless the code uses a floating point prefix (eg "0f"). */
3978 save_in = input_line_pointer;
3979 input_line_pointer = *str;
3980 if (expression (&exp) == absolute_section
3981 && exp.X_op == O_big
3982 && exp.X_add_number < 0)
3984 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3986 if (gen_to_words (words, 5, (long) 15) == 0)
3988 for (i = 0; i < NUM_FLOAT_VALS; i++)
3990 for (j = 0; j < MAX_LITTLENUMS; j++)
3992 if (words[j] != fp_values[i][j])
3996 if (j == MAX_LITTLENUMS)
3998 *str = input_line_pointer;
3999 input_line_pointer = save_in;
4006 *str = input_line_pointer;
4007 input_line_pointer = save_in;
4008 inst.error = _("invalid FPA immediate expression");
4012 /* Returns 1 if a number has "quarter-precision" float format
4013 0baBbbbbbc defgh000 00000000 00000000. */
4016 is_quarter_float (unsigned imm)
4018 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4019 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4022 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4023 0baBbbbbbc defgh000 00000000 00000000.
4024 The minus-zero case needs special handling, since it can't be encoded in the
4025 "quarter-precision" float format, but can nonetheless be loaded as an integer
4029 parse_qfloat_immediate (char **ccp, int *immed)
4032 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4034 skip_past_char (&str, '#');
4036 if ((str = atof_ieee (str, 's', words)) != NULL)
4038 unsigned fpword = 0;
4041 /* Our FP word must be 32 bits (single-precision FP). */
4042 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4044 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4048 if (is_quarter_float (fpword) || fpword == 0x80000000)
4061 /* Shift operands. */
4064 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4067 struct asm_shift_name
4070 enum shift_kind kind;
4073 /* Third argument to parse_shift. */
4074 enum parse_shift_mode
4076 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4077 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4078 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4079 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4080 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4083 /* Parse a <shift> specifier on an ARM data processing instruction.
4084 This has three forms:
4086 (LSL|LSR|ASL|ASR|ROR) Rs
4087 (LSL|LSR|ASL|ASR|ROR) #imm
4090 Note that ASL is assimilated to LSL in the instruction encoding, and
4091 RRX to ROR #0 (which cannot be written as such). */
4094 parse_shift (char **str, int i, enum parse_shift_mode mode)
4096 const struct asm_shift_name *shift_name;
4097 enum shift_kind shift;
4102 for (p = *str; ISALPHA (*p); p++)
4107 inst.error = _("shift expression expected");
4111 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4113 if (shift_name == NULL)
4115 inst.error = _("shift expression expected");
4119 shift = shift_name->kind;
4123 case NO_SHIFT_RESTRICT:
4124 case SHIFT_IMMEDIATE: break;
4126 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4127 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4129 inst.error = _("'LSL' or 'ASR' required");
4134 case SHIFT_LSL_IMMEDIATE:
4135 if (shift != SHIFT_LSL)
4137 inst.error = _("'LSL' required");
4142 case SHIFT_ASR_IMMEDIATE:
4143 if (shift != SHIFT_ASR)
4145 inst.error = _("'ASR' required");
4153 if (shift != SHIFT_RRX)
4155 /* Whitespace can appear here if the next thing is a bare digit. */
4156 skip_whitespace (p);
4158 if (mode == NO_SHIFT_RESTRICT
4159 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4161 inst.operands[i].imm = reg;
4162 inst.operands[i].immisreg = 1;
4164 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4167 inst.operands[i].shift_kind = shift;
4168 inst.operands[i].shifted = 1;
4173 /* Parse a <shifter_operand> for an ARM data processing instruction:
4176 #<immediate>, <rotate>
4180 where <shift> is defined by parse_shift above, and <rotate> is a
4181 multiple of 2 between 0 and 30. Validation of immediate operands
4182 is deferred to md_apply_fix. */
4185 parse_shifter_operand (char **str, int i)
4190 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4192 inst.operands[i].reg = value;
4193 inst.operands[i].isreg = 1;
4195 /* parse_shift will override this if appropriate */
4196 inst.reloc.exp.X_op = O_constant;
4197 inst.reloc.exp.X_add_number = 0;
4199 if (skip_past_comma (str) == FAIL)
4202 /* Shift operation on register. */
4203 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4206 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4209 if (skip_past_comma (str) == SUCCESS)
4211 /* #x, y -- ie explicit rotation by Y. */
4212 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4215 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4217 inst.error = _("constant expression expected");
4221 value = expr.X_add_number;
4222 if (value < 0 || value > 30 || value % 2 != 0)
4224 inst.error = _("invalid rotation");
4227 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4229 inst.error = _("invalid constant");
4233 /* Convert to decoded value. md_apply_fix will put it back. */
4234 inst.reloc.exp.X_add_number
4235 = (((inst.reloc.exp.X_add_number << (32 - value))
4236 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4239 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4240 inst.reloc.pc_rel = 0;
4244 /* Parse all forms of an ARM address expression. Information is written
4245 to inst.operands[i] and/or inst.reloc.
4247 Preindexed addressing (.preind=1):
4249 [Rn, #offset] .reg=Rn .reloc.exp=offset
4250 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4251 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4252 .shift_kind=shift .reloc.exp=shift_imm
4254 These three may have a trailing ! which causes .writeback to be set also.
4256 Postindexed addressing (.postind=1, .writeback=1):
4258 [Rn], #offset .reg=Rn .reloc.exp=offset
4259 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4260 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4261 .shift_kind=shift .reloc.exp=shift_imm
4263 Unindexed addressing (.preind=0, .postind=0):
4265 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4269 [Rn]{!} shorthand for [Rn,#0]{!}
4270 =immediate .isreg=0 .reloc.exp=immediate
4271 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4273 It is the caller's responsibility to check for addressing modes not
4274 supported by the instruction, and to set inst.reloc.type. */
4277 parse_address (char **str, int i)
4282 if (skip_past_char (&p, '[') == FAIL)
4284 if (skip_past_char (&p, '=') == FAIL)
4286 /* bare address - translate to PC-relative offset */
4287 inst.reloc.pc_rel = 1;
4288 inst.operands[i].reg = REG_PC;
4289 inst.operands[i].isreg = 1;
4290 inst.operands[i].preind = 1;
4292 /* else a load-constant pseudo op, no special treatment needed here */
4294 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4301 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4303 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4306 inst.operands[i].reg = reg;
4307 inst.operands[i].isreg = 1;
4309 if (skip_past_comma (&p) == SUCCESS)
4311 inst.operands[i].preind = 1;
4314 else if (*p == '-') p++, inst.operands[i].negative = 1;
4316 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4318 inst.operands[i].imm = reg;
4319 inst.operands[i].immisreg = 1;
4321 if (skip_past_comma (&p) == SUCCESS)
4322 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4325 else if (skip_past_char (&p, ':') == SUCCESS)
4327 /* FIXME: '@' should be used here, but it's filtered out by generic
4328 code before we get to see it here. This may be subject to
4331 my_get_expression (&exp, &p, GE_NO_PREFIX);
4332 if (exp.X_op != O_constant)
4334 inst.error = _("alignment must be constant");
4337 inst.operands[i].imm = exp.X_add_number << 8;
4338 inst.operands[i].immisalign = 1;
4339 /* Alignments are not pre-indexes. */
4340 inst.operands[i].preind = 0;
4344 if (inst.operands[i].negative)
4346 inst.operands[i].negative = 0;
4349 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4354 if (skip_past_char (&p, ']') == FAIL)
4356 inst.error = _("']' expected");
4360 if (skip_past_char (&p, '!') == SUCCESS)
4361 inst.operands[i].writeback = 1;
4363 else if (skip_past_comma (&p) == SUCCESS)
4365 if (skip_past_char (&p, '{') == SUCCESS)
4367 /* [Rn], {expr} - unindexed, with option */
4368 if (parse_immediate (&p, &inst.operands[i].imm,
4369 0, 255, TRUE) == FAIL)
4372 if (skip_past_char (&p, '}') == FAIL)
4374 inst.error = _("'}' expected at end of 'option' field");
4377 if (inst.operands[i].preind)
4379 inst.error = _("cannot combine index with option");
4387 inst.operands[i].postind = 1;
4388 inst.operands[i].writeback = 1;
4390 if (inst.operands[i].preind)
4392 inst.error = _("cannot combine pre- and post-indexing");
4397 else if (*p == '-') p++, inst.operands[i].negative = 1;
4399 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4401 /* We might be using the immediate for alignment already. If we
4402 are, OR the register number into the low-order bits. */
4403 if (inst.operands[i].immisalign)
4404 inst.operands[i].imm |= reg;
4406 inst.operands[i].imm = reg;
4407 inst.operands[i].immisreg = 1;
4409 if (skip_past_comma (&p) == SUCCESS)
4410 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4415 if (inst.operands[i].negative)
4417 inst.operands[i].negative = 0;
4420 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4426 /* If at this point neither .preind nor .postind is set, we have a
4427 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4428 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4430 inst.operands[i].preind = 1;
4431 inst.reloc.exp.X_op = O_constant;
4432 inst.reloc.exp.X_add_number = 0;
4438 /* Miscellaneous. */
4440 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4441 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4443 parse_psr (char **str)
4446 unsigned long psr_field;
4447 const struct asm_psr *psr;
4450 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4451 feature for ease of use and backwards compatibility. */
4453 if (strncasecmp (p, "SPSR", 4) == 0)
4454 psr_field = SPSR_BIT;
4455 else if (strncasecmp (p, "CPSR", 4) == 0)
4462 while (ISALNUM (*p) || *p == '_');
4464 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4475 /* A suffix follows. */
4481 while (ISALNUM (*p) || *p == '_');
4483 psr = hash_find_n (arm_psr_hsh, start, p - start);
4487 psr_field |= psr->field;
4492 goto error; /* Garbage after "[CS]PSR". */
4494 psr_field |= (PSR_c | PSR_f);
4500 inst.error = _("flag for {c}psr instruction expected");
4504 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4505 value suitable for splatting into the AIF field of the instruction. */
4508 parse_cps_flags (char **str)
4517 case '\0': case ',':
4520 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4521 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4522 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4525 inst.error = _("unrecognized CPS flag");
4530 if (saw_a_flag == 0)
4532 inst.error = _("missing CPS flags");
4540 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4541 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4544 parse_endian_specifier (char **str)
4549 if (strncasecmp (s, "BE", 2))
4551 else if (strncasecmp (s, "LE", 2))
4555 inst.error = _("valid endian specifiers are be or le");
4559 if (ISALNUM (s[2]) || s[2] == '_')
4561 inst.error = _("valid endian specifiers are be or le");
4566 return little_endian;
4569 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4570 value suitable for poking into the rotate field of an sxt or sxta
4571 instruction, or FAIL on error. */
4574 parse_ror (char **str)
4579 if (strncasecmp (s, "ROR", 3) == 0)
4583 inst.error = _("missing rotation field after comma");
4587 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4592 case 0: *str = s; return 0x0;
4593 case 8: *str = s; return 0x1;
4594 case 16: *str = s; return 0x2;
4595 case 24: *str = s; return 0x3;
4598 inst.error = _("rotation can only be 0, 8, 16, or 24");
4603 /* Parse a conditional code (from conds[] below). The value returned is in the
4604 range 0 .. 14, or FAIL. */
4606 parse_cond (char **str)
4609 const struct asm_cond *c;
4612 while (ISALPHA (*q))
4615 c = hash_find_n (arm_cond_hsh, p, q - p);
4618 inst.error = _("condition required");
4626 /* Parse an option for a barrier instruction. Returns the encoding for the
4629 parse_barrier (char **str)
4632 const struct asm_barrier_opt *o;
4635 while (ISALPHA (*q))
4638 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4646 /* Parse the operands of a table branch instruction. Similar to a memory
4649 parse_tb (char **str)
4654 if (skip_past_char (&p, '[') == FAIL)
4656 inst.error = _("'[' expected");
4660 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4662 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4665 inst.operands[0].reg = reg;
4667 if (skip_past_comma (&p) == FAIL)
4669 inst.error = _("',' expected");
4673 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4675 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4678 inst.operands[0].imm = reg;
4680 if (skip_past_comma (&p) == SUCCESS)
4682 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4684 if (inst.reloc.exp.X_add_number != 1)
4686 inst.error = _("invalid shift");
4689 inst.operands[0].shifted = 1;
4692 if (skip_past_char (&p, ']') == FAIL)
4694 inst.error = _("']' expected");
4701 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4702 information on the types the operands can take and how they are encoded.
4703 Note particularly the abuse of ".regisimm" to signify a Neon register.
4704 Up to three operands may be read; this function handles setting the
4705 ".present" field for each operand itself.
4706 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4707 else returns FAIL. */
4710 parse_neon_mov (char **str, int *which_operand)
4712 int i = *which_operand, val;
4713 enum arm_reg_type rtype;
4715 struct neon_type_el optype;
4717 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4719 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4720 inst.operands[i].reg = val;
4721 inst.operands[i].isscalar = 1;
4722 inst.operands[i].vectype = optype;
4723 inst.operands[i++].present = 1;
4725 if (skip_past_comma (&ptr) == FAIL)
4728 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4731 inst.operands[i].reg = val;
4732 inst.operands[i].isreg = 1;
4733 inst.operands[i].present = 1;
4735 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4738 /* Cases 0, 1, 2, 3, 5 (D only). */
4739 if (skip_past_comma (&ptr) == FAIL)
4742 inst.operands[i].reg = val;
4743 inst.operands[i].isreg = 1;
4744 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4745 inst.operands[i].vectype = optype;
4746 inst.operands[i++].present = 1;
4748 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4750 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4751 inst.operands[i-1].regisimm = 1;
4752 inst.operands[i].reg = val;
4753 inst.operands[i].isreg = 1;
4754 inst.operands[i++].present = 1;
4756 if (rtype == REG_TYPE_NQ)
4758 first_error (_("can't use Neon quad register here"));
4761 if (skip_past_comma (&ptr) == FAIL)
4763 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4765 inst.operands[i].reg = val;
4766 inst.operands[i].isreg = 1;
4767 inst.operands[i].present = 1;
4769 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4771 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4772 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4773 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4776 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4778 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4779 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4780 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4783 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4786 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4787 Case 1: VMOV<c><q> <Dd>, <Dm> */
4788 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4791 inst.operands[i].reg = val;
4792 inst.operands[i].isreg = 1;
4793 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4794 inst.operands[i].vectype = optype;
4795 inst.operands[i].present = 1;
4799 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4803 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4806 inst.operands[i].reg = val;
4807 inst.operands[i].isreg = 1;
4808 inst.operands[i++].present = 1;
4810 if (skip_past_comma (&ptr) == FAIL)
4813 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4815 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4816 inst.operands[i].reg = val;
4817 inst.operands[i].isscalar = 1;
4818 inst.operands[i].present = 1;
4819 inst.operands[i].vectype = optype;
4821 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4823 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4824 inst.operands[i].reg = val;
4825 inst.operands[i].isreg = 1;
4826 inst.operands[i++].present = 1;
4828 if (skip_past_comma (&ptr) == FAIL)
4831 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFD, NULL, &optype))
4834 first_error (_(reg_expected_msgs[REG_TYPE_VFD]));
4838 inst.operands[i].reg = val;
4839 inst.operands[i].isreg = 1;
4840 inst.operands[i].regisimm = 1;
4841 inst.operands[i].vectype = optype;
4842 inst.operands[i].present = 1;
4847 first_error (_("parse error"));
4851 /* Successfully parsed the operands. Update args. */
4857 first_error (_("expected comma"));
4861 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4865 first_error (_("instruction cannot be conditionalized"));
4869 /* Matcher codes for parse_operands. */
4870 enum operand_parse_code
4872 OP_stop, /* end of line */
4874 OP_RR, /* ARM register */
4875 OP_RRnpc, /* ARM register, not r15 */
4876 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4877 OP_RRw, /* ARM register, not r15, optional trailing ! */
4878 OP_RCP, /* Coprocessor number */
4879 OP_RCN, /* Coprocessor register */
4880 OP_RF, /* FPA register */
4881 OP_RVS, /* VFP single precision register */
4882 OP_RVD, /* VFP double precision register (0..15) */
4883 OP_RND, /* Neon double precision register (0..31) */
4884 OP_RNQ, /* Neon quad precision register */
4885 OP_RNDQ, /* Neon double or quad precision register */
4886 OP_RNSC, /* Neon scalar D[X] */
4887 OP_RVC, /* VFP control register */
4888 OP_RMF, /* Maverick F register */
4889 OP_RMD, /* Maverick D register */
4890 OP_RMFX, /* Maverick FX register */
4891 OP_RMDX, /* Maverick DX register */
4892 OP_RMAX, /* Maverick AX register */
4893 OP_RMDS, /* Maverick DSPSC register */
4894 OP_RIWR, /* iWMMXt wR register */
4895 OP_RIWC, /* iWMMXt wC register */
4896 OP_RIWG, /* iWMMXt wCG register */
4897 OP_RXA, /* XScale accumulator register */
4899 OP_REGLST, /* ARM register list */
4900 OP_VRSLST, /* VFP single-precision register list */
4901 OP_VRDLST, /* VFP double-precision register list */
4902 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4903 OP_NSTRLST, /* Neon element/structure list */
4905 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4906 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4907 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4908 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4909 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4910 OP_VMOV, /* Neon VMOV operands. */
4911 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4912 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4914 OP_I0, /* immediate zero */
4915 OP_I7, /* immediate value 0 .. 7 */
4916 OP_I15, /* 0 .. 15 */
4917 OP_I16, /* 1 .. 16 */
4918 OP_I16z, /* 0 .. 16 */
4919 OP_I31, /* 0 .. 31 */
4920 OP_I31w, /* 0 .. 31, optional trailing ! */
4921 OP_I32, /* 1 .. 32 */
4922 OP_I32z, /* 0 .. 32 */
4923 OP_I63, /* 0 .. 63 */
4924 OP_I63s, /* -64 .. 63 */
4925 OP_I64, /* 1 .. 64 */
4926 OP_I64z, /* 0 .. 64 */
4927 OP_I255, /* 0 .. 255 */
4928 OP_Iffff, /* 0 .. 65535 */
4930 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4931 OP_I7b, /* 0 .. 7 */
4932 OP_I15b, /* 0 .. 15 */
4933 OP_I31b, /* 0 .. 31 */
4935 OP_SH, /* shifter operand */
4936 OP_ADDR, /* Memory address expression (any mode) */
4937 OP_EXP, /* arbitrary expression */
4938 OP_EXPi, /* same, with optional immediate prefix */
4939 OP_EXPr, /* same, with optional relocation suffix */
4941 OP_CPSF, /* CPS flags */
4942 OP_ENDI, /* Endianness specifier */
4943 OP_PSR, /* CPSR/SPSR mask for msr */
4944 OP_COND, /* conditional code */
4945 OP_TB, /* Table branch. */
4947 OP_RRnpc_I0, /* ARM register or literal 0 */
4948 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4949 OP_RR_EXi, /* ARM register or expression with imm prefix */
4950 OP_RF_IF, /* FPA register or immediate */
4951 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4953 /* Optional operands. */
4954 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4955 OP_oI31b, /* 0 .. 31 */
4956 OP_oI32b, /* 1 .. 32 */
4957 OP_oIffffb, /* 0 .. 65535 */
4958 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
4960 OP_oRR, /* ARM register */
4961 OP_oRRnpc, /* ARM register, not the PC */
4962 OP_oRND, /* Optional Neon double precision register */
4963 OP_oRNQ, /* Optional Neon quad precision register */
4964 OP_oRNDQ, /* Optional Neon double or quad precision register */
4965 OP_oSHll, /* LSL immediate */
4966 OP_oSHar, /* ASR immediate */
4967 OP_oSHllar, /* LSL or ASR immediate */
4968 OP_oROR, /* ROR 0/8/16/24 */
4969 OP_oBARRIER, /* Option argument for a barrier instruction. */
4971 OP_FIRST_OPTIONAL = OP_oI7b
4974 /* Generic instruction operand parser. This does no encoding and no
4975 semantic validation; it merely squirrels values away in the inst
4976 structure. Returns SUCCESS or FAIL depending on whether the
4977 specified grammar matched. */
4979 parse_operands (char *str, const unsigned char *pattern)
4981 unsigned const char *upat = pattern;
4982 char *backtrack_pos = 0;
4983 const char *backtrack_error = 0;
4984 int i, val, backtrack_index = 0;
4985 enum arm_reg_type rtype;
4987 #define po_char_or_fail(chr) do { \
4988 if (skip_past_char (&str, chr) == FAIL) \
4992 #define po_reg_or_fail(regtype) do { \
4993 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4994 &inst.operands[i].vectype); \
4997 first_error (_(reg_expected_msgs[regtype])); \
5000 inst.operands[i].reg = val; \
5001 inst.operands[i].isreg = 1; \
5002 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5005 #define po_reg_or_goto(regtype, label) do { \
5006 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5007 &inst.operands[i].vectype); \
5011 inst.operands[i].reg = val; \
5012 inst.operands[i].isreg = 1; \
5013 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5016 #define po_imm_or_fail(min, max, popt) do { \
5017 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5019 inst.operands[i].imm = val; \
5022 #define po_scalar_or_goto(elsz, label) do { \
5023 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5026 inst.operands[i].reg = val; \
5027 inst.operands[i].isscalar = 1; \
5030 #define po_misc_or_fail(expr) do { \
5035 skip_whitespace (str);
5037 for (i = 0; upat[i] != OP_stop; i++)
5039 if (upat[i] >= OP_FIRST_OPTIONAL)
5041 /* Remember where we are in case we need to backtrack. */
5042 assert (!backtrack_pos);
5043 backtrack_pos = str;
5044 backtrack_error = inst.error;
5045 backtrack_index = i;
5049 po_char_or_fail (',');
5057 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5058 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5059 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5060 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5061 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5062 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5064 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5065 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5066 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5067 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5068 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5069 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5070 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5071 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5072 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5073 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5074 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5075 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5077 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5079 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5081 /* Neon scalar. Using an element size of 8 means that some invalid
5082 scalars are accepted here, so deal with those in later code. */
5083 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5085 /* WARNING: We can expand to two operands here. This has the potential
5086 to totally confuse the backtracking mechanism! It will be OK at
5087 least as long as we don't try to use optional args as well,
5091 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5093 skip_past_comma (&str);
5094 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5097 /* Optional register operand was omitted. Unfortunately, it's in
5098 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5099 here (this is a bit grotty). */
5100 inst.operands[i] = inst.operands[i-1];
5101 inst.operands[i-1].present = 0;
5104 /* Immediate gets verified properly later, so accept any now. */
5105 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5111 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5114 po_imm_or_fail (0, 0, TRUE);
5120 po_scalar_or_goto (8, try_rr);
5123 po_reg_or_fail (REG_TYPE_RN);
5129 po_scalar_or_goto (8, try_ndq);
5132 po_reg_or_fail (REG_TYPE_NDQ);
5138 po_scalar_or_goto (8, try_vfd);
5141 po_reg_or_fail (REG_TYPE_VFD);
5146 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5147 not careful then bad things might happen. */
5148 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5153 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5156 /* There's a possibility of getting a 64-bit immediate here, so
5157 we need special handling. */
5158 if (parse_big_immediate (&str, i) == FAIL)
5160 inst.error = _("immediate value is out of range");
5168 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5171 po_imm_or_fail (0, 63, TRUE);
5176 po_char_or_fail ('[');
5177 po_reg_or_fail (REG_TYPE_RN);
5178 po_char_or_fail (']');
5182 po_reg_or_fail (REG_TYPE_RN);
5183 if (skip_past_char (&str, '!') == SUCCESS)
5184 inst.operands[i].writeback = 1;
5188 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5189 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5190 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5191 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5192 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5193 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5194 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5195 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5196 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5197 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5198 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5199 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5200 case OP_Iffff: po_imm_or_fail ( 0, 0xffff, FALSE); break;
5202 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5204 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5205 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5207 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5208 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5209 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5211 /* Immediate variants */
5213 po_char_or_fail ('{');
5214 po_imm_or_fail (0, 255, TRUE);
5215 po_char_or_fail ('}');
5219 /* The expression parser chokes on a trailing !, so we have
5220 to find it first and zap it. */
5223 while (*s && *s != ',')
5228 inst.operands[i].writeback = 1;
5230 po_imm_or_fail (0, 31, TRUE);
5238 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5243 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5248 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5250 if (inst.reloc.exp.X_op == O_symbol)
5252 val = parse_reloc (&str);
5255 inst.error = _("unrecognized relocation suffix");
5258 else if (val != BFD_RELOC_UNUSED)
5260 inst.operands[i].imm = val;
5261 inst.operands[i].hasreloc = 1;
5266 /* Register or expression */
5267 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5268 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5270 /* Register or immediate */
5271 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5272 I0: po_imm_or_fail (0, 0, FALSE); break;
5274 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5276 if (!is_immediate_prefix (*str))
5279 val = parse_fpa_immediate (&str);
5282 /* FPA immediates are encoded as registers 8-15.
5283 parse_fpa_immediate has already applied the offset. */
5284 inst.operands[i].reg = val;
5285 inst.operands[i].isreg = 1;
5288 /* Two kinds of register */
5291 struct reg_entry *rege = arm_reg_parse_multi (&str);
5292 if (rege->type != REG_TYPE_MMXWR
5293 && rege->type != REG_TYPE_MMXWC
5294 && rege->type != REG_TYPE_MMXWCG)
5296 inst.error = _("iWMMXt data or control register expected");
5299 inst.operands[i].reg = rege->number;
5300 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5305 case OP_CPSF: val = parse_cps_flags (&str); break;
5306 case OP_ENDI: val = parse_endian_specifier (&str); break;
5307 case OP_oROR: val = parse_ror (&str); break;
5308 case OP_PSR: val = parse_psr (&str); break;
5309 case OP_COND: val = parse_cond (&str); break;
5310 case OP_oBARRIER:val = parse_barrier (&str); break;
5313 po_misc_or_fail (parse_tb (&str));
5316 /* Register lists */
5318 val = parse_reg_list (&str);
5321 inst.operands[1].writeback = 1;
5327 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5331 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5335 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5340 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5341 &inst.operands[i].vectype);
5344 /* Addressing modes */
5346 po_misc_or_fail (parse_address (&str, i));
5350 po_misc_or_fail (parse_shifter_operand (&str, i));
5354 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5358 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5362 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5366 as_fatal ("unhandled operand code %d", upat[i]);
5369 /* Various value-based sanity checks and shared operations. We
5370 do not signal immediate failures for the register constraints;
5371 this allows a syntax error to take precedence. */
5379 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5380 inst.error = BAD_PC;
5396 inst.operands[i].imm = val;
5403 /* If we get here, this operand was successfully parsed. */
5404 inst.operands[i].present = 1;
5408 inst.error = BAD_ARGS;
5413 /* The parse routine should already have set inst.error, but set a
5414 defaut here just in case. */
5416 inst.error = _("syntax error");
5420 /* Do not backtrack over a trailing optional argument that
5421 absorbed some text. We will only fail again, with the
5422 'garbage following instruction' error message, which is
5423 probably less helpful than the current one. */
5424 if (backtrack_index == i && backtrack_pos != str
5425 && upat[i+1] == OP_stop)
5428 inst.error = _("syntax error");
5432 /* Try again, skipping the optional argument at backtrack_pos. */
5433 str = backtrack_pos;
5434 inst.error = backtrack_error;
5435 inst.operands[backtrack_index].present = 0;
5436 i = backtrack_index;
5440 /* Check that we have parsed all the arguments. */
5441 if (*str != '\0' && !inst.error)
5442 inst.error = _("garbage following instruction");
5444 return inst.error ? FAIL : SUCCESS;
5447 #undef po_char_or_fail
5448 #undef po_reg_or_fail
5449 #undef po_reg_or_goto
5450 #undef po_imm_or_fail
5451 #undef po_scalar_or_fail
5453 /* Shorthand macro for instruction encoding functions issuing errors. */
5454 #define constraint(expr, err) do { \
5462 /* Functions for operand encoding. ARM, then Thumb. */
5464 #define rotate_left(v, n) (v << n | v >> (32 - n))
5466 /* If VAL can be encoded in the immediate field of an ARM instruction,
5467 return the encoded form. Otherwise, return FAIL. */
5470 encode_arm_immediate (unsigned int val)
5474 for (i = 0; i < 32; i += 2)
5475 if ((a = rotate_left (val, i)) <= 0xff)
5476 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5481 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5482 return the encoded form. Otherwise, return FAIL. */
5484 encode_thumb32_immediate (unsigned int val)
5491 for (i = 1; i <= 24; i++)
5494 if ((val & ~(0xff << i)) == 0)
5495 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5499 if (val == ((a << 16) | a))
5501 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5505 if (val == ((a << 16) | a))
5506 return 0x200 | (a >> 8);
5510 /* Encode a VFP SP or DP register number into inst.instruction. */
5513 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5515 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5518 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5521 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5524 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5529 first_error (_("D register out of range for selected VFP version"));
5537 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5541 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5545 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5549 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5553 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5557 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5565 /* Encode a <shift> in an ARM-format instruction. The immediate,
5566 if any, is handled by md_apply_fix. */
5568 encode_arm_shift (int i)
5570 if (inst.operands[i].shift_kind == SHIFT_RRX)
5571 inst.instruction |= SHIFT_ROR << 5;
5574 inst.instruction |= inst.operands[i].shift_kind << 5;
5575 if (inst.operands[i].immisreg)
5577 inst.instruction |= SHIFT_BY_REG;
5578 inst.instruction |= inst.operands[i].imm << 8;
5581 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5586 encode_arm_shifter_operand (int i)
5588 if (inst.operands[i].isreg)
5590 inst.instruction |= inst.operands[i].reg;
5591 encode_arm_shift (i);
5594 inst.instruction |= INST_IMMEDIATE;
5597 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5599 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5601 assert (inst.operands[i].isreg);
5602 inst.instruction |= inst.operands[i].reg << 16;
5604 if (inst.operands[i].preind)
5608 inst.error = _("instruction does not accept preindexed addressing");
5611 inst.instruction |= PRE_INDEX;
5612 if (inst.operands[i].writeback)
5613 inst.instruction |= WRITE_BACK;
5616 else if (inst.operands[i].postind)
5618 assert (inst.operands[i].writeback);
5620 inst.instruction |= WRITE_BACK;
5622 else /* unindexed - only for coprocessor */
5624 inst.error = _("instruction does not accept unindexed addressing");
5628 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5629 && (((inst.instruction & 0x000f0000) >> 16)
5630 == ((inst.instruction & 0x0000f000) >> 12)))
5631 as_warn ((inst.instruction & LOAD_BIT)
5632 ? _("destination register same as write-back base")
5633 : _("source register same as write-back base"));
5636 /* inst.operands[i] was set up by parse_address. Encode it into an
5637 ARM-format mode 2 load or store instruction. If is_t is true,
5638 reject forms that cannot be used with a T instruction (i.e. not
5641 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5643 encode_arm_addr_mode_common (i, is_t);
5645 if (inst.operands[i].immisreg)
5647 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5648 inst.instruction |= inst.operands[i].imm;
5649 if (!inst.operands[i].negative)
5650 inst.instruction |= INDEX_UP;
5651 if (inst.operands[i].shifted)
5653 if (inst.operands[i].shift_kind == SHIFT_RRX)
5654 inst.instruction |= SHIFT_ROR << 5;
5657 inst.instruction |= inst.operands[i].shift_kind << 5;
5658 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5662 else /* immediate offset in inst.reloc */
5664 if (inst.reloc.type == BFD_RELOC_UNUSED)
5665 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5669 /* inst.operands[i] was set up by parse_address. Encode it into an
5670 ARM-format mode 3 load or store instruction. Reject forms that
5671 cannot be used with such instructions. If is_t is true, reject
5672 forms that cannot be used with a T instruction (i.e. not
5675 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5677 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5679 inst.error = _("instruction does not accept scaled register index");
5683 encode_arm_addr_mode_common (i, is_t);
5685 if (inst.operands[i].immisreg)
5687 inst.instruction |= inst.operands[i].imm;
5688 if (!inst.operands[i].negative)
5689 inst.instruction |= INDEX_UP;
5691 else /* immediate offset in inst.reloc */
5693 inst.instruction |= HWOFFSET_IMM;
5694 if (inst.reloc.type == BFD_RELOC_UNUSED)
5695 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5699 /* inst.operands[i] was set up by parse_address. Encode it into an
5700 ARM-format instruction. Reject all forms which cannot be encoded
5701 into a coprocessor load/store instruction. If wb_ok is false,
5702 reject use of writeback; if unind_ok is false, reject use of
5703 unindexed addressing. If reloc_override is not 0, use it instead
5704 of BFD_ARM_CP_OFF_IMM. */
5707 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5709 inst.instruction |= inst.operands[i].reg << 16;
5711 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5713 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5715 assert (!inst.operands[i].writeback);
5718 inst.error = _("instruction does not support unindexed addressing");
5721 inst.instruction |= inst.operands[i].imm;
5722 inst.instruction |= INDEX_UP;
5726 if (inst.operands[i].preind)
5727 inst.instruction |= PRE_INDEX;
5729 if (inst.operands[i].writeback)
5731 if (inst.operands[i].reg == REG_PC)
5733 inst.error = _("pc may not be used with write-back");
5738 inst.error = _("instruction does not support writeback");
5741 inst.instruction |= WRITE_BACK;
5745 inst.reloc.type = reloc_override;
5746 else if (thumb_mode)
5747 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5749 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5753 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5754 Determine whether it can be performed with a move instruction; if
5755 it can, convert inst.instruction to that move instruction and
5756 return 1; if it can't, convert inst.instruction to a literal-pool
5757 load and return 0. If this is not a valid thing to do in the
5758 current context, set inst.error and return 1.
5760 inst.operands[i] describes the destination register. */
5763 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5768 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5772 if ((inst.instruction & tbit) == 0)
5774 inst.error = _("invalid pseudo operation");
5777 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5779 inst.error = _("constant expression expected");
5782 if (inst.reloc.exp.X_op == O_constant)
5786 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5788 /* This can be done with a mov(1) instruction. */
5789 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5790 inst.instruction |= inst.reloc.exp.X_add_number;
5796 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5799 /* This can be done with a mov instruction. */
5800 inst.instruction &= LITERAL_MASK;
5801 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5802 inst.instruction |= value & 0xfff;
5806 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5809 /* This can be done with a mvn instruction. */
5810 inst.instruction &= LITERAL_MASK;
5811 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5812 inst.instruction |= value & 0xfff;
5818 if (add_to_lit_pool () == FAIL)
5820 inst.error = _("literal pool insertion failed");
5823 inst.operands[1].reg = REG_PC;
5824 inst.operands[1].isreg = 1;
5825 inst.operands[1].preind = 1;
5826 inst.reloc.pc_rel = 1;
5827 inst.reloc.type = (thumb_p
5828 ? BFD_RELOC_ARM_THUMB_OFFSET
5830 ? BFD_RELOC_ARM_HWLITERAL
5831 : BFD_RELOC_ARM_LITERAL));
5835 /* Functions for instruction encoding, sorted by subarchitecture.
5836 First some generics; their names are taken from the conventional
5837 bit positions for register arguments in ARM format instructions. */
5847 inst.instruction |= inst.operands[0].reg << 12;
5853 inst.instruction |= inst.operands[0].reg << 12;
5854 inst.instruction |= inst.operands[1].reg;
5860 inst.instruction |= inst.operands[0].reg << 12;
5861 inst.instruction |= inst.operands[1].reg << 16;
5867 inst.instruction |= inst.operands[0].reg << 16;
5868 inst.instruction |= inst.operands[1].reg << 12;
5874 unsigned Rn = inst.operands[2].reg;
5875 /* Enforce restrictions on SWP instruction. */
5876 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5877 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5878 _("Rn must not overlap other operands"));
5879 inst.instruction |= inst.operands[0].reg << 12;
5880 inst.instruction |= inst.operands[1].reg;
5881 inst.instruction |= Rn << 16;
5887 inst.instruction |= inst.operands[0].reg << 12;
5888 inst.instruction |= inst.operands[1].reg << 16;
5889 inst.instruction |= inst.operands[2].reg;
5895 inst.instruction |= inst.operands[0].reg;
5896 inst.instruction |= inst.operands[1].reg << 12;
5897 inst.instruction |= inst.operands[2].reg << 16;
5903 inst.instruction |= inst.operands[0].imm;
5909 inst.instruction |= inst.operands[0].reg << 12;
5910 encode_arm_cp_address (1, TRUE, TRUE, 0);
5913 /* ARM instructions, in alphabetical order by function name (except
5914 that wrapper functions appear immediately after the function they
5917 /* This is a pseudo-op of the form "adr rd, label" to be converted
5918 into a relative address of the form "add rd, pc, #label-.-8". */
5923 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5925 /* Frag hacking will turn this into a sub instruction if the offset turns
5926 out to be negative. */
5927 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5928 inst.reloc.pc_rel = 1;
5929 inst.reloc.exp.X_add_number -= 8;
5932 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5933 into a relative address of the form:
5934 add rd, pc, #low(label-.-8)"
5935 add rd, rd, #high(label-.-8)" */
5940 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5942 /* Frag hacking will turn this into a sub instruction if the offset turns
5943 out to be negative. */
5944 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5945 inst.reloc.pc_rel = 1;
5946 inst.size = INSN_SIZE * 2;
5947 inst.reloc.exp.X_add_number -= 8;
5953 if (!inst.operands[1].present)
5954 inst.operands[1].reg = inst.operands[0].reg;
5955 inst.instruction |= inst.operands[0].reg << 12;
5956 inst.instruction |= inst.operands[1].reg << 16;
5957 encode_arm_shifter_operand (2);
5963 if (inst.operands[0].present)
5965 constraint ((inst.instruction & 0xf0) != 0x40
5966 && inst.operands[0].imm != 0xf,
5967 "bad barrier type");
5968 inst.instruction |= inst.operands[0].imm;
5971 inst.instruction |= 0xf;
5977 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
5978 constraint (msb > 32, _("bit-field extends past end of register"));
5979 /* The instruction encoding stores the LSB and MSB,
5980 not the LSB and width. */
5981 inst.instruction |= inst.operands[0].reg << 12;
5982 inst.instruction |= inst.operands[1].imm << 7;
5983 inst.instruction |= (msb - 1) << 16;
5991 /* #0 in second position is alternative syntax for bfc, which is
5992 the same instruction but with REG_PC in the Rm field. */
5993 if (!inst.operands[1].isreg)
5994 inst.operands[1].reg = REG_PC;
5996 msb = inst.operands[2].imm + inst.operands[3].imm;
5997 constraint (msb > 32, _("bit-field extends past end of register"));
5998 /* The instruction encoding stores the LSB and MSB,
5999 not the LSB and width. */
6000 inst.instruction |= inst.operands[0].reg << 12;
6001 inst.instruction |= inst.operands[1].reg;
6002 inst.instruction |= inst.operands[2].imm << 7;
6003 inst.instruction |= (msb - 1) << 16;
6009 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6010 _("bit-field extends past end of register"));
6011 inst.instruction |= inst.operands[0].reg << 12;
6012 inst.instruction |= inst.operands[1].reg;
6013 inst.instruction |= inst.operands[2].imm << 7;
6014 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6017 /* ARM V5 breakpoint instruction (argument parse)
6018 BKPT <16 bit unsigned immediate>
6019 Instruction is not conditional.
6020 The bit pattern given in insns[] has the COND_ALWAYS condition,
6021 and it is an error if the caller tried to override that. */
6026 /* Top 12 of 16 bits to bits 19:8. */
6027 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6029 /* Bottom 4 of 16 bits to bits 3:0. */
6030 inst.instruction |= inst.operands[0].imm & 0xf;
6034 encode_branch (int default_reloc)
6036 if (inst.operands[0].hasreloc)
6038 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6039 _("the only suffix valid here is '(plt)'"));
6040 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6044 inst.reloc.type = default_reloc;
6046 inst.reloc.pc_rel = 1;
6053 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6054 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6057 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6064 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6066 if (inst.cond == COND_ALWAYS)
6067 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6069 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6073 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6076 /* ARM V5 branch-link-exchange instruction (argument parse)
6077 BLX <target_addr> ie BLX(1)
6078 BLX{<condition>} <Rm> ie BLX(2)
6079 Unfortunately, there are two different opcodes for this mnemonic.
6080 So, the insns[].value is not used, and the code here zaps values
6081 into inst.instruction.
6082 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6087 if (inst.operands[0].isreg)
6089 /* Arg is a register; the opcode provided by insns[] is correct.
6090 It is not illegal to do "blx pc", just useless. */
6091 if (inst.operands[0].reg == REG_PC)
6092 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6094 inst.instruction |= inst.operands[0].reg;
6098 /* Arg is an address; this instruction cannot be executed
6099 conditionally, and the opcode must be adjusted. */
6100 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6101 inst.instruction = 0xfa000000;
6103 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6104 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6107 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6114 if (inst.operands[0].reg == REG_PC)
6115 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6117 inst.instruction |= inst.operands[0].reg;
6121 /* ARM v5TEJ. Jump to Jazelle code. */
6126 if (inst.operands[0].reg == REG_PC)
6127 as_tsktsk (_("use of r15 in bxj is not really useful"));
6129 inst.instruction |= inst.operands[0].reg;
6132 /* Co-processor data operation:
6133 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6134 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6138 inst.instruction |= inst.operands[0].reg << 8;
6139 inst.instruction |= inst.operands[1].imm << 20;
6140 inst.instruction |= inst.operands[2].reg << 12;
6141 inst.instruction |= inst.operands[3].reg << 16;
6142 inst.instruction |= inst.operands[4].reg;
6143 inst.instruction |= inst.operands[5].imm << 5;
6149 inst.instruction |= inst.operands[0].reg << 16;
6150 encode_arm_shifter_operand (1);
6153 /* Transfer between coprocessor and ARM registers.
6154 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6159 No special properties. */
6164 inst.instruction |= inst.operands[0].reg << 8;
6165 inst.instruction |= inst.operands[1].imm << 21;
6166 inst.instruction |= inst.operands[2].reg << 12;
6167 inst.instruction |= inst.operands[3].reg << 16;
6168 inst.instruction |= inst.operands[4].reg;
6169 inst.instruction |= inst.operands[5].imm << 5;
6172 /* Transfer between coprocessor register and pair of ARM registers.
6173 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6178 Two XScale instructions are special cases of these:
6180 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6181 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6183 Result unpredicatable if Rd or Rn is R15. */
6188 inst.instruction |= inst.operands[0].reg << 8;
6189 inst.instruction |= inst.operands[1].imm << 4;
6190 inst.instruction |= inst.operands[2].reg << 12;
6191 inst.instruction |= inst.operands[3].reg << 16;
6192 inst.instruction |= inst.operands[4].reg;
6198 inst.instruction |= inst.operands[0].imm << 6;
6199 inst.instruction |= inst.operands[1].imm;
6205 inst.instruction |= inst.operands[0].imm;
6211 /* There is no IT instruction in ARM mode. We
6212 process it but do not generate code for it. */
6219 int base_reg = inst.operands[0].reg;
6220 int range = inst.operands[1].imm;
6222 inst.instruction |= base_reg << 16;
6223 inst.instruction |= range;
6225 if (inst.operands[1].writeback)
6226 inst.instruction |= LDM_TYPE_2_OR_3;
6228 if (inst.operands[0].writeback)
6230 inst.instruction |= WRITE_BACK;
6231 /* Check for unpredictable uses of writeback. */
6232 if (inst.instruction & LOAD_BIT)
6234 /* Not allowed in LDM type 2. */
6235 if ((inst.instruction & LDM_TYPE_2_OR_3)
6236 && ((range & (1 << REG_PC)) == 0))
6237 as_warn (_("writeback of base register is UNPREDICTABLE"));
6238 /* Only allowed if base reg not in list for other types. */
6239 else if (range & (1 << base_reg))
6240 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6244 /* Not allowed for type 2. */
6245 if (inst.instruction & LDM_TYPE_2_OR_3)
6246 as_warn (_("writeback of base register is UNPREDICTABLE"));
6247 /* Only allowed if base reg not in list, or first in list. */
6248 else if ((range & (1 << base_reg))
6249 && (range & ((1 << base_reg) - 1)))
6250 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6255 /* ARMv5TE load-consecutive (argument parse)
6264 constraint (inst.operands[0].reg % 2 != 0,
6265 _("first destination register must be even"));
6266 constraint (inst.operands[1].present
6267 && inst.operands[1].reg != inst.operands[0].reg + 1,
6268 _("can only load two consecutive registers"));
6269 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6270 constraint (!inst.operands[2].isreg, _("'[' expected"));
6272 if (!inst.operands[1].present)
6273 inst.operands[1].reg = inst.operands[0].reg + 1;
6275 if (inst.instruction & LOAD_BIT)
6277 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6278 register and the first register written; we have to diagnose
6279 overlap between the base and the second register written here. */
6281 if (inst.operands[2].reg == inst.operands[1].reg
6282 && (inst.operands[2].writeback || inst.operands[2].postind))
6283 as_warn (_("base register written back, and overlaps "
6284 "second destination register"));
6286 /* For an index-register load, the index register must not overlap the
6287 destination (even if not write-back). */
6288 else if (inst.operands[2].immisreg
6289 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6290 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6291 as_warn (_("index register overlaps destination register"));
6294 inst.instruction |= inst.operands[0].reg << 12;
6295 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6301 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6302 || inst.operands[1].postind || inst.operands[1].writeback
6303 || inst.operands[1].immisreg || inst.operands[1].shifted
6304 || inst.operands[1].negative
6305 /* This can arise if the programmer has written
6307 or if they have mistakenly used a register name as the last
6310 It is very difficult to distinguish between these two cases
6311 because "rX" might actually be a label. ie the register
6312 name has been occluded by a symbol of the same name. So we
6313 just generate a general 'bad addressing mode' type error
6314 message and leave it up to the programmer to discover the
6315 true cause and fix their mistake. */
6316 || (inst.operands[1].reg == REG_PC),
6319 constraint (inst.reloc.exp.X_op != O_constant
6320 || inst.reloc.exp.X_add_number != 0,
6321 _("offset must be zero in ARM encoding"));
6323 inst.instruction |= inst.operands[0].reg << 12;
6324 inst.instruction |= inst.operands[1].reg << 16;
6325 inst.reloc.type = BFD_RELOC_UNUSED;
6331 constraint (inst.operands[0].reg % 2 != 0,
6332 _("even register required"));
6333 constraint (inst.operands[1].present
6334 && inst.operands[1].reg != inst.operands[0].reg + 1,
6335 _("can only load two consecutive registers"));
6336 /* If op 1 were present and equal to PC, this function wouldn't
6337 have been called in the first place. */
6338 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6340 inst.instruction |= inst.operands[0].reg << 12;
6341 inst.instruction |= inst.operands[2].reg << 16;
6347 inst.instruction |= inst.operands[0].reg << 12;
6348 if (!inst.operands[1].isreg)
6349 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6351 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6357 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6359 if (inst.operands[1].preind)
6361 constraint (inst.reloc.exp.X_op != O_constant ||
6362 inst.reloc.exp.X_add_number != 0,
6363 _("this instruction requires a post-indexed address"));
6365 inst.operands[1].preind = 0;
6366 inst.operands[1].postind = 1;
6367 inst.operands[1].writeback = 1;
6369 inst.instruction |= inst.operands[0].reg << 12;
6370 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6373 /* Halfword and signed-byte load/store operations. */
6378 inst.instruction |= inst.operands[0].reg << 12;
6379 if (!inst.operands[1].isreg)
6380 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6382 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6388 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6390 if (inst.operands[1].preind)
6392 constraint (inst.reloc.exp.X_op != O_constant ||
6393 inst.reloc.exp.X_add_number != 0,
6394 _("this instruction requires a post-indexed address"));
6396 inst.operands[1].preind = 0;
6397 inst.operands[1].postind = 1;
6398 inst.operands[1].writeback = 1;
6400 inst.instruction |= inst.operands[0].reg << 12;
6401 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6404 /* Co-processor register load/store.
6405 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6409 inst.instruction |= inst.operands[0].reg << 8;
6410 inst.instruction |= inst.operands[1].reg << 12;
6411 encode_arm_cp_address (2, TRUE, TRUE, 0);
6417 /* This restriction does not apply to mls (nor to mla in v6, but
6418 that's hard to detect at present). */
6419 if (inst.operands[0].reg == inst.operands[1].reg
6420 && !(inst.instruction & 0x00400000))
6421 as_tsktsk (_("rd and rm should be different in mla"));
6423 inst.instruction |= inst.operands[0].reg << 16;
6424 inst.instruction |= inst.operands[1].reg;
6425 inst.instruction |= inst.operands[2].reg << 8;
6426 inst.instruction |= inst.operands[3].reg << 12;
6433 inst.instruction |= inst.operands[0].reg << 12;
6434 encode_arm_shifter_operand (1);
6437 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6441 inst.instruction |= inst.operands[0].reg << 12;
6442 /* The value is in two pieces: 0:11, 16:19. */
6443 inst.instruction |= (inst.operands[1].imm & 0x00000fff);
6444 inst.instruction |= (inst.operands[1].imm & 0x0000f000) << 4;
6450 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6451 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6453 _("'CPSR' or 'SPSR' expected"));
6454 inst.instruction |= inst.operands[0].reg << 12;
6455 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6458 /* Two possible forms:
6459 "{C|S}PSR_<field>, Rm",
6460 "{C|S}PSR_f, #expression". */
6465 inst.instruction |= inst.operands[0].imm;
6466 if (inst.operands[1].isreg)
6467 inst.instruction |= inst.operands[1].reg;
6470 inst.instruction |= INST_IMMEDIATE;
6471 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6472 inst.reloc.pc_rel = 0;
6479 if (!inst.operands[2].present)
6480 inst.operands[2].reg = inst.operands[0].reg;
6481 inst.instruction |= inst.operands[0].reg << 16;
6482 inst.instruction |= inst.operands[1].reg;
6483 inst.instruction |= inst.operands[2].reg << 8;
6485 if (inst.operands[0].reg == inst.operands[1].reg)
6486 as_tsktsk (_("rd and rm should be different in mul"));
6489 /* Long Multiply Parser
6490 UMULL RdLo, RdHi, Rm, Rs
6491 SMULL RdLo, RdHi, Rm, Rs
6492 UMLAL RdLo, RdHi, Rm, Rs
6493 SMLAL RdLo, RdHi, Rm, Rs. */
6498 inst.instruction |= inst.operands[0].reg << 12;
6499 inst.instruction |= inst.operands[1].reg << 16;
6500 inst.instruction |= inst.operands[2].reg;
6501 inst.instruction |= inst.operands[3].reg << 8;
6503 /* rdhi, rdlo and rm must all be different. */
6504 if (inst.operands[0].reg == inst.operands[1].reg
6505 || inst.operands[0].reg == inst.operands[2].reg
6506 || inst.operands[1].reg == inst.operands[2].reg)
6507 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6513 if (inst.operands[0].present)
6515 /* Architectural NOP hints are CPSR sets with no bits selected. */
6516 inst.instruction &= 0xf0000000;
6517 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6521 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6522 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6523 Condition defaults to COND_ALWAYS.
6524 Error if Rd, Rn or Rm are R15. */
6529 inst.instruction |= inst.operands[0].reg << 12;
6530 inst.instruction |= inst.operands[1].reg << 16;
6531 inst.instruction |= inst.operands[2].reg;
6532 if (inst.operands[3].present)
6533 encode_arm_shift (3);
6536 /* ARM V6 PKHTB (Argument Parse). */
6541 if (!inst.operands[3].present)
6543 /* If the shift specifier is omitted, turn the instruction
6544 into pkhbt rd, rm, rn. */
6545 inst.instruction &= 0xfff00010;
6546 inst.instruction |= inst.operands[0].reg << 12;
6547 inst.instruction |= inst.operands[1].reg;
6548 inst.instruction |= inst.operands[2].reg << 16;
6552 inst.instruction |= inst.operands[0].reg << 12;
6553 inst.instruction |= inst.operands[1].reg << 16;
6554 inst.instruction |= inst.operands[2].reg;
6555 encode_arm_shift (3);
6559 /* ARMv5TE: Preload-Cache
6563 Syntactically, like LDR with B=1, W=0, L=1. */
6568 constraint (!inst.operands[0].isreg,
6569 _("'[' expected after PLD mnemonic"));
6570 constraint (inst.operands[0].postind,
6571 _("post-indexed expression used in preload instruction"));
6572 constraint (inst.operands[0].writeback,
6573 _("writeback used in preload instruction"));
6574 constraint (!inst.operands[0].preind,
6575 _("unindexed addressing used in preload instruction"));
6576 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6579 /* ARMv7: PLI <addr_mode> */
6583 constraint (!inst.operands[0].isreg,
6584 _("'[' expected after PLI mnemonic"));
6585 constraint (inst.operands[0].postind,
6586 _("post-indexed expression used in preload instruction"));
6587 constraint (inst.operands[0].writeback,
6588 _("writeback used in preload instruction"));
6589 constraint (!inst.operands[0].preind,
6590 _("unindexed addressing used in preload instruction"));
6591 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6592 inst.instruction &= ~PRE_INDEX;
6598 inst.operands[1] = inst.operands[0];
6599 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6600 inst.operands[0].isreg = 1;
6601 inst.operands[0].writeback = 1;
6602 inst.operands[0].reg = REG_SP;
6606 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6607 word at the specified address and the following word
6609 Unconditionally executed.
6610 Error if Rn is R15. */
6615 inst.instruction |= inst.operands[0].reg << 16;
6616 if (inst.operands[0].writeback)
6617 inst.instruction |= WRITE_BACK;
6620 /* ARM V6 ssat (argument parse). */
6625 inst.instruction |= inst.operands[0].reg << 12;
6626 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6627 inst.instruction |= inst.operands[2].reg;
6629 if (inst.operands[3].present)
6630 encode_arm_shift (3);
6633 /* ARM V6 usat (argument parse). */
6638 inst.instruction |= inst.operands[0].reg << 12;
6639 inst.instruction |= inst.operands[1].imm << 16;
6640 inst.instruction |= inst.operands[2].reg;
6642 if (inst.operands[3].present)
6643 encode_arm_shift (3);
6646 /* ARM V6 ssat16 (argument parse). */
6651 inst.instruction |= inst.operands[0].reg << 12;
6652 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6653 inst.instruction |= inst.operands[2].reg;
6659 inst.instruction |= inst.operands[0].reg << 12;
6660 inst.instruction |= inst.operands[1].imm << 16;
6661 inst.instruction |= inst.operands[2].reg;
6664 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6665 preserving the other bits.
6667 setend <endian_specifier>, where <endian_specifier> is either
6673 if (inst.operands[0].imm)
6674 inst.instruction |= 0x200;
6680 unsigned int Rm = (inst.operands[1].present
6681 ? inst.operands[1].reg
6682 : inst.operands[0].reg);
6684 inst.instruction |= inst.operands[0].reg << 12;
6685 inst.instruction |= Rm;
6686 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6688 inst.instruction |= inst.operands[2].reg << 8;
6689 inst.instruction |= SHIFT_BY_REG;
6692 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6698 inst.reloc.type = BFD_RELOC_ARM_SMC;
6699 inst.reloc.pc_rel = 0;
6705 inst.reloc.type = BFD_RELOC_ARM_SWI;
6706 inst.reloc.pc_rel = 0;
6709 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6710 SMLAxy{cond} Rd,Rm,Rs,Rn
6711 SMLAWy{cond} Rd,Rm,Rs,Rn
6712 Error if any register is R15. */
6717 inst.instruction |= inst.operands[0].reg << 16;
6718 inst.instruction |= inst.operands[1].reg;
6719 inst.instruction |= inst.operands[2].reg << 8;
6720 inst.instruction |= inst.operands[3].reg << 12;
6723 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6724 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6725 Error if any register is R15.
6726 Warning if Rdlo == Rdhi. */
6731 inst.instruction |= inst.operands[0].reg << 12;
6732 inst.instruction |= inst.operands[1].reg << 16;
6733 inst.instruction |= inst.operands[2].reg;
6734 inst.instruction |= inst.operands[3].reg << 8;
6736 if (inst.operands[0].reg == inst.operands[1].reg)
6737 as_tsktsk (_("rdhi and rdlo must be different"));
6740 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6741 SMULxy{cond} Rd,Rm,Rs
6742 Error if any register is R15. */
6747 inst.instruction |= inst.operands[0].reg << 16;
6748 inst.instruction |= inst.operands[1].reg;
6749 inst.instruction |= inst.operands[2].reg << 8;
6752 /* ARM V6 srs (argument parse). */
6757 inst.instruction |= inst.operands[0].imm;
6758 if (inst.operands[0].writeback)
6759 inst.instruction |= WRITE_BACK;
6762 /* ARM V6 strex (argument parse). */
6767 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6768 || inst.operands[2].postind || inst.operands[2].writeback
6769 || inst.operands[2].immisreg || inst.operands[2].shifted
6770 || inst.operands[2].negative
6771 /* See comment in do_ldrex(). */
6772 || (inst.operands[2].reg == REG_PC),
6775 constraint (inst.operands[0].reg == inst.operands[1].reg
6776 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6778 constraint (inst.reloc.exp.X_op != O_constant
6779 || inst.reloc.exp.X_add_number != 0,
6780 _("offset must be zero in ARM encoding"));
6782 inst.instruction |= inst.operands[0].reg << 12;
6783 inst.instruction |= inst.operands[1].reg;
6784 inst.instruction |= inst.operands[2].reg << 16;
6785 inst.reloc.type = BFD_RELOC_UNUSED;
6791 constraint (inst.operands[1].reg % 2 != 0,
6792 _("even register required"));
6793 constraint (inst.operands[2].present
6794 && inst.operands[2].reg != inst.operands[1].reg + 1,
6795 _("can only store two consecutive registers"));
6796 /* If op 2 were present and equal to PC, this function wouldn't
6797 have been called in the first place. */
6798 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6800 constraint (inst.operands[0].reg == inst.operands[1].reg
6801 || inst.operands[0].reg == inst.operands[1].reg + 1
6802 || inst.operands[0].reg == inst.operands[3].reg,
6805 inst.instruction |= inst.operands[0].reg << 12;
6806 inst.instruction |= inst.operands[1].reg;
6807 inst.instruction |= inst.operands[3].reg << 16;
6810 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6811 extends it to 32-bits, and adds the result to a value in another
6812 register. You can specify a rotation by 0, 8, 16, or 24 bits
6813 before extracting the 16-bit value.
6814 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6815 Condition defaults to COND_ALWAYS.
6816 Error if any register uses R15. */
6821 inst.instruction |= inst.operands[0].reg << 12;
6822 inst.instruction |= inst.operands[1].reg << 16;
6823 inst.instruction |= inst.operands[2].reg;
6824 inst.instruction |= inst.operands[3].imm << 10;
6829 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6830 Condition defaults to COND_ALWAYS.
6831 Error if any register uses R15. */
6836 inst.instruction |= inst.operands[0].reg << 12;
6837 inst.instruction |= inst.operands[1].reg;
6838 inst.instruction |= inst.operands[2].imm << 10;
6841 /* VFP instructions. In a logical order: SP variant first, monad
6842 before dyad, arithmetic then move then load/store. */
6845 do_vfp_sp_monadic (void)
6847 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6848 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6852 do_vfp_sp_dyadic (void)
6854 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6855 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6856 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6860 do_vfp_sp_compare_z (void)
6862 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6866 do_vfp_dp_sp_cvt (void)
6868 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6869 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6873 do_vfp_sp_dp_cvt (void)
6875 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6876 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6880 do_vfp_reg_from_sp (void)
6882 inst.instruction |= inst.operands[0].reg << 12;
6883 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6887 do_vfp_reg2_from_sp2 (void)
6889 constraint (inst.operands[2].imm != 2,
6890 _("only two consecutive VFP SP registers allowed here"));
6891 inst.instruction |= inst.operands[0].reg << 12;
6892 inst.instruction |= inst.operands[1].reg << 16;
6893 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6897 do_vfp_sp_from_reg (void)
6899 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6900 inst.instruction |= inst.operands[1].reg << 12;
6904 do_vfp_sp2_from_reg2 (void)
6906 constraint (inst.operands[0].imm != 2,
6907 _("only two consecutive VFP SP registers allowed here"));
6908 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6909 inst.instruction |= inst.operands[1].reg << 12;
6910 inst.instruction |= inst.operands[2].reg << 16;
6914 do_vfp_sp_ldst (void)
6916 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6917 encode_arm_cp_address (1, FALSE, TRUE, 0);
6921 do_vfp_dp_ldst (void)
6923 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6924 encode_arm_cp_address (1, FALSE, TRUE, 0);
6929 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6931 if (inst.operands[0].writeback)
6932 inst.instruction |= WRITE_BACK;
6934 constraint (ldstm_type != VFP_LDSTMIA,
6935 _("this addressing mode requires base-register writeback"));
6936 inst.instruction |= inst.operands[0].reg << 16;
6937 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6938 inst.instruction |= inst.operands[1].imm;
6942 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
6946 if (inst.operands[0].writeback)
6947 inst.instruction |= WRITE_BACK;
6949 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
6950 _("this addressing mode requires base-register writeback"));
6952 inst.instruction |= inst.operands[0].reg << 16;
6953 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6955 count = inst.operands[1].imm << 1;
6956 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
6959 inst.instruction |= count;
6963 do_vfp_sp_ldstmia (void)
6965 vfp_sp_ldstm (VFP_LDSTMIA);
6969 do_vfp_sp_ldstmdb (void)
6971 vfp_sp_ldstm (VFP_LDSTMDB);
6975 do_vfp_dp_ldstmia (void)
6977 vfp_dp_ldstm (VFP_LDSTMIA);
6981 do_vfp_dp_ldstmdb (void)
6983 vfp_dp_ldstm (VFP_LDSTMDB);
6987 do_vfp_xp_ldstmia (void)
6989 vfp_dp_ldstm (VFP_LDSTMIAX);
6993 do_vfp_xp_ldstmdb (void)
6995 vfp_dp_ldstm (VFP_LDSTMDBX);
6999 do_vfp_dp_rd_rm (void)
7001 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7002 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7006 do_vfp_dp_rn_rd (void)
7008 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7009 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7013 do_vfp_dp_rd_rn (void)
7015 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7016 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7020 do_vfp_dp_rd_rn_rm (void)
7022 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7023 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7024 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7030 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7034 do_vfp_dp_rm_rd_rn (void)
7036 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7037 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7038 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7041 /* VFPv3 instructions. */
7043 do_vfp_sp_const (void)
7045 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7046 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7047 inst.instruction |= (inst.operands[1].imm >> 4);
7051 do_vfp_dp_const (void)
7053 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7054 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7055 inst.instruction |= (inst.operands[1].imm >> 4);
7059 vfp_conv (int srcsize)
7061 unsigned immbits = srcsize - inst.operands[1].imm;
7062 inst.instruction |= (immbits & 1) << 5;
7063 inst.instruction |= (immbits >> 1);
7067 do_vfp_sp_conv_16 (void)
7069 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7074 do_vfp_dp_conv_16 (void)
7076 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7081 do_vfp_sp_conv_32 (void)
7083 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7088 do_vfp_dp_conv_32 (void)
7090 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7095 /* FPA instructions. Also in a logical order. */
7100 inst.instruction |= inst.operands[0].reg << 16;
7101 inst.instruction |= inst.operands[1].reg;
7105 do_fpa_ldmstm (void)
7107 inst.instruction |= inst.operands[0].reg << 12;
7108 switch (inst.operands[1].imm)
7110 case 1: inst.instruction |= CP_T_X; break;
7111 case 2: inst.instruction |= CP_T_Y; break;
7112 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7117 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7119 /* The instruction specified "ea" or "fd", so we can only accept
7120 [Rn]{!}. The instruction does not really support stacking or
7121 unstacking, so we have to emulate these by setting appropriate
7122 bits and offsets. */
7123 constraint (inst.reloc.exp.X_op != O_constant
7124 || inst.reloc.exp.X_add_number != 0,
7125 _("this instruction does not support indexing"));
7127 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7128 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7130 if (!(inst.instruction & INDEX_UP))
7131 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7133 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7135 inst.operands[2].preind = 0;
7136 inst.operands[2].postind = 1;
7140 encode_arm_cp_address (2, TRUE, TRUE, 0);
7143 /* iWMMXt instructions: strictly in alphabetical order. */
7146 do_iwmmxt_tandorc (void)
7148 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7152 do_iwmmxt_textrc (void)
7154 inst.instruction |= inst.operands[0].reg << 12;
7155 inst.instruction |= inst.operands[1].imm;
7159 do_iwmmxt_textrm (void)
7161 inst.instruction |= inst.operands[0].reg << 12;
7162 inst.instruction |= inst.operands[1].reg << 16;
7163 inst.instruction |= inst.operands[2].imm;
7167 do_iwmmxt_tinsr (void)
7169 inst.instruction |= inst.operands[0].reg << 16;
7170 inst.instruction |= inst.operands[1].reg << 12;
7171 inst.instruction |= inst.operands[2].imm;
7175 do_iwmmxt_tmia (void)
7177 inst.instruction |= inst.operands[0].reg << 5;
7178 inst.instruction |= inst.operands[1].reg;
7179 inst.instruction |= inst.operands[2].reg << 12;
7183 do_iwmmxt_waligni (void)
7185 inst.instruction |= inst.operands[0].reg << 12;
7186 inst.instruction |= inst.operands[1].reg << 16;
7187 inst.instruction |= inst.operands[2].reg;
7188 inst.instruction |= inst.operands[3].imm << 20;
7192 do_iwmmxt_wmov (void)
7194 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7195 inst.instruction |= inst.operands[0].reg << 12;
7196 inst.instruction |= inst.operands[1].reg << 16;
7197 inst.instruction |= inst.operands[1].reg;
7201 do_iwmmxt_wldstbh (void)
7204 inst.instruction |= inst.operands[0].reg << 12;
7206 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7208 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7209 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7213 do_iwmmxt_wldstw (void)
7215 /* RIWR_RIWC clears .isreg for a control register. */
7216 if (!inst.operands[0].isreg)
7218 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7219 inst.instruction |= 0xf0000000;
7222 inst.instruction |= inst.operands[0].reg << 12;
7223 encode_arm_cp_address (1, TRUE, TRUE, 0);
7227 do_iwmmxt_wldstd (void)
7229 inst.instruction |= inst.operands[0].reg << 12;
7230 encode_arm_cp_address (1, TRUE, FALSE, 0);
7234 do_iwmmxt_wshufh (void)
7236 inst.instruction |= inst.operands[0].reg << 12;
7237 inst.instruction |= inst.operands[1].reg << 16;
7238 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7239 inst.instruction |= (inst.operands[2].imm & 0x0f);
7243 do_iwmmxt_wzero (void)
7245 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7246 inst.instruction |= inst.operands[0].reg;
7247 inst.instruction |= inst.operands[0].reg << 12;
7248 inst.instruction |= inst.operands[0].reg << 16;
7251 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7252 operations first, then control, shift, and load/store. */
7254 /* Insns like "foo X,Y,Z". */
7257 do_mav_triple (void)
7259 inst.instruction |= inst.operands[0].reg << 16;
7260 inst.instruction |= inst.operands[1].reg;
7261 inst.instruction |= inst.operands[2].reg << 12;
7264 /* Insns like "foo W,X,Y,Z".
7265 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7270 inst.instruction |= inst.operands[0].reg << 5;
7271 inst.instruction |= inst.operands[1].reg << 12;
7272 inst.instruction |= inst.operands[2].reg << 16;
7273 inst.instruction |= inst.operands[3].reg;
7276 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7280 inst.instruction |= inst.operands[1].reg << 12;
7283 /* Maverick shift immediate instructions.
7284 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7285 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7290 int imm = inst.operands[2].imm;
7292 inst.instruction |= inst.operands[0].reg << 12;
7293 inst.instruction |= inst.operands[1].reg << 16;
7295 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7296 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7297 Bit 4 should be 0. */
7298 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7300 inst.instruction |= imm;
7303 /* XScale instructions. Also sorted arithmetic before move. */
7305 /* Xscale multiply-accumulate (argument parse)
7308 MIAxycc acc0,Rm,Rs. */
7313 inst.instruction |= inst.operands[1].reg;
7314 inst.instruction |= inst.operands[2].reg << 12;
7317 /* Xscale move-accumulator-register (argument parse)
7319 MARcc acc0,RdLo,RdHi. */
7324 inst.instruction |= inst.operands[1].reg << 12;
7325 inst.instruction |= inst.operands[2].reg << 16;
7328 /* Xscale move-register-accumulator (argument parse)
7330 MRAcc RdLo,RdHi,acc0. */
7335 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7336 inst.instruction |= inst.operands[0].reg << 12;
7337 inst.instruction |= inst.operands[1].reg << 16;
7340 /* Encoding functions relevant only to Thumb. */
7342 /* inst.operands[i] is a shifted-register operand; encode
7343 it into inst.instruction in the format used by Thumb32. */
7346 encode_thumb32_shifted_operand (int i)
7348 unsigned int value = inst.reloc.exp.X_add_number;
7349 unsigned int shift = inst.operands[i].shift_kind;
7351 constraint (inst.operands[i].immisreg,
7352 _("shift by register not allowed in thumb mode"));
7353 inst.instruction |= inst.operands[i].reg;
7354 if (shift == SHIFT_RRX)
7355 inst.instruction |= SHIFT_ROR << 4;
7358 constraint (inst.reloc.exp.X_op != O_constant,
7359 _("expression too complex"));
7361 constraint (value > 32
7362 || (value == 32 && (shift == SHIFT_LSL
7363 || shift == SHIFT_ROR)),
7364 _("shift expression is too large"));
7368 else if (value == 32)
7371 inst.instruction |= shift << 4;
7372 inst.instruction |= (value & 0x1c) << 10;
7373 inst.instruction |= (value & 0x03) << 6;
7378 /* inst.operands[i] was set up by parse_address. Encode it into a
7379 Thumb32 format load or store instruction. Reject forms that cannot
7380 be used with such instructions. If is_t is true, reject forms that
7381 cannot be used with a T instruction; if is_d is true, reject forms
7382 that cannot be used with a D instruction. */
7385 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7387 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7389 constraint (!inst.operands[i].isreg,
7390 _("Instruction does not support =N addresses"));
7392 inst.instruction |= inst.operands[i].reg << 16;
7393 if (inst.operands[i].immisreg)
7395 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7396 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7397 constraint (inst.operands[i].negative,
7398 _("Thumb does not support negative register indexing"));
7399 constraint (inst.operands[i].postind,
7400 _("Thumb does not support register post-indexing"));
7401 constraint (inst.operands[i].writeback,
7402 _("Thumb does not support register indexing with writeback"));
7403 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7404 _("Thumb supports only LSL in shifted register indexing"));
7406 inst.instruction |= inst.operands[i].imm;
7407 if (inst.operands[i].shifted)
7409 constraint (inst.reloc.exp.X_op != O_constant,
7410 _("expression too complex"));
7411 constraint (inst.reloc.exp.X_add_number < 0
7412 || inst.reloc.exp.X_add_number > 3,
7413 _("shift out of range"));
7414 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7416 inst.reloc.type = BFD_RELOC_UNUSED;
7418 else if (inst.operands[i].preind)
7420 constraint (is_pc && inst.operands[i].writeback,
7421 _("cannot use writeback with PC-relative addressing"));
7422 constraint (is_t && inst.operands[i].writeback,
7423 _("cannot use writeback with this instruction"));
7427 inst.instruction |= 0x01000000;
7428 if (inst.operands[i].writeback)
7429 inst.instruction |= 0x00200000;
7433 inst.instruction |= 0x00000c00;
7434 if (inst.operands[i].writeback)
7435 inst.instruction |= 0x00000100;
7437 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7439 else if (inst.operands[i].postind)
7441 assert (inst.operands[i].writeback);
7442 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7443 constraint (is_t, _("cannot use post-indexing with this instruction"));
7446 inst.instruction |= 0x00200000;
7448 inst.instruction |= 0x00000900;
7449 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7451 else /* unindexed - only for coprocessor */
7452 inst.error = _("instruction does not accept unindexed addressing");
7455 /* Table of Thumb instructions which exist in both 16- and 32-bit
7456 encodings (the latter only in post-V6T2 cores). The index is the
7457 value used in the insns table below. When there is more than one
7458 possible 16-bit encoding for the instruction, this table always
7460 Also contains several pseudo-instructions used during relaxation. */
7461 #define T16_32_TAB \
7462 X(adc, 4140, eb400000), \
7463 X(adcs, 4140, eb500000), \
7464 X(add, 1c00, eb000000), \
7465 X(adds, 1c00, eb100000), \
7466 X(addi, 0000, f1000000), \
7467 X(addis, 0000, f1100000), \
7468 X(add_pc,000f, f20f0000), \
7469 X(add_sp,000d, f10d0000), \
7470 X(adr, 000f, f20f0000), \
7471 X(and, 4000, ea000000), \
7472 X(ands, 4000, ea100000), \
7473 X(asr, 1000, fa40f000), \
7474 X(asrs, 1000, fa50f000), \
7475 X(b, e000, f000b000), \
7476 X(bcond, d000, f0008000), \
7477 X(bic, 4380, ea200000), \
7478 X(bics, 4380, ea300000), \
7479 X(cmn, 42c0, eb100f00), \
7480 X(cmp, 2800, ebb00f00), \
7481 X(cpsie, b660, f3af8400), \
7482 X(cpsid, b670, f3af8600), \
7483 X(cpy, 4600, ea4f0000), \
7484 X(dec_sp,80dd, f1bd0d00), \
7485 X(eor, 4040, ea800000), \
7486 X(eors, 4040, ea900000), \
7487 X(inc_sp,00dd, f10d0d00), \
7488 X(ldmia, c800, e8900000), \
7489 X(ldr, 6800, f8500000), \
7490 X(ldrb, 7800, f8100000), \
7491 X(ldrh, 8800, f8300000), \
7492 X(ldrsb, 5600, f9100000), \
7493 X(ldrsh, 5e00, f9300000), \
7494 X(ldr_pc,4800, f85f0000), \
7495 X(ldr_pc2,4800, f85f0000), \
7496 X(ldr_sp,9800, f85d0000), \
7497 X(lsl, 0000, fa00f000), \
7498 X(lsls, 0000, fa10f000), \
7499 X(lsr, 0800, fa20f000), \
7500 X(lsrs, 0800, fa30f000), \
7501 X(mov, 2000, ea4f0000), \
7502 X(movs, 2000, ea5f0000), \
7503 X(mul, 4340, fb00f000), \
7504 X(muls, 4340, ffffffff), /* no 32b muls */ \
7505 X(mvn, 43c0, ea6f0000), \
7506 X(mvns, 43c0, ea7f0000), \
7507 X(neg, 4240, f1c00000), /* rsb #0 */ \
7508 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7509 X(orr, 4300, ea400000), \
7510 X(orrs, 4300, ea500000), \
7511 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7512 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7513 X(rev, ba00, fa90f080), \
7514 X(rev16, ba40, fa90f090), \
7515 X(revsh, bac0, fa90f0b0), \
7516 X(ror, 41c0, fa60f000), \
7517 X(rors, 41c0, fa70f000), \
7518 X(sbc, 4180, eb600000), \
7519 X(sbcs, 4180, eb700000), \
7520 X(stmia, c000, e8800000), \
7521 X(str, 6000, f8400000), \
7522 X(strb, 7000, f8000000), \
7523 X(strh, 8000, f8200000), \
7524 X(str_sp,9000, f84d0000), \
7525 X(sub, 1e00, eba00000), \
7526 X(subs, 1e00, ebb00000), \
7527 X(subi, 8000, f1a00000), \
7528 X(subis, 8000, f1b00000), \
7529 X(sxtb, b240, fa4ff080), \
7530 X(sxth, b200, fa0ff080), \
7531 X(tst, 4200, ea100f00), \
7532 X(uxtb, b2c0, fa5ff080), \
7533 X(uxth, b280, fa1ff080), \
7534 X(nop, bf00, f3af8000), \
7535 X(yield, bf10, f3af8001), \
7536 X(wfe, bf20, f3af8002), \
7537 X(wfi, bf30, f3af8003), \
7538 X(sev, bf40, f3af9004), /* typo, 8004? */
7540 /* To catch errors in encoding functions, the codes are all offset by
7541 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7542 as 16-bit instructions. */
7543 #define X(a,b,c) T_MNEM_##a
7544 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7547 #define X(a,b,c) 0x##b
7548 static const unsigned short thumb_op16[] = { T16_32_TAB };
7549 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7552 #define X(a,b,c) 0x##c
7553 static const unsigned int thumb_op32[] = { T16_32_TAB };
7554 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7555 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7559 /* Thumb instruction encoders, in alphabetical order. */
7563 do_t_add_sub_w (void)
7567 Rd = inst.operands[0].reg;
7568 Rn = inst.operands[1].reg;
7570 constraint (Rd == 15, _("PC not allowed as destination"));
7571 inst.instruction |= (Rn << 16) | (Rd << 8);
7572 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7575 /* Parse an add or subtract instruction. We get here with inst.instruction
7576 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7583 Rd = inst.operands[0].reg;
7584 Rs = (inst.operands[1].present
7585 ? inst.operands[1].reg /* Rd, Rs, foo */
7586 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7594 flags = (inst.instruction == T_MNEM_adds
7595 || inst.instruction == T_MNEM_subs);
7597 narrow = (current_it_mask == 0);
7599 narrow = (current_it_mask != 0);
7600 if (!inst.operands[2].isreg)
7603 if (inst.size_req != 4)
7607 add = (inst.instruction == T_MNEM_add
7608 || inst.instruction == T_MNEM_adds);
7609 /* Attempt to use a narrow opcode, with relaxation if
7611 if (Rd == REG_SP && Rs == REG_SP && !flags)
7612 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7613 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7614 opcode = T_MNEM_add_sp;
7615 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7616 opcode = T_MNEM_add_pc;
7617 else if (Rd <= 7 && Rs <= 7 && narrow)
7620 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7622 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7626 inst.instruction = THUMB_OP16(opcode);
7627 inst.instruction |= (Rd << 4) | Rs;
7628 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7629 if (inst.size_req != 2)
7630 inst.relax = opcode;
7633 constraint (inst.size_req == 2, BAD_HIREG);
7635 if (inst.size_req == 4
7636 || (inst.size_req != 2 && !opcode))
7638 /* ??? Convert large immediates to addw/subw. */
7639 inst.instruction = THUMB_OP32 (inst.instruction);
7640 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7641 inst.instruction |= inst.operands[0].reg << 8;
7642 inst.instruction |= inst.operands[1].reg << 16;
7643 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7648 Rn = inst.operands[2].reg;
7649 /* See if we can do this with a 16-bit instruction. */
7650 if (!inst.operands[2].shifted && inst.size_req != 4)
7652 if (Rd > 7 || Rs > 7 || Rn > 7)
7657 inst.instruction = ((inst.instruction == T_MNEM_adds
7658 || inst.instruction == T_MNEM_add)
7661 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7665 if (inst.instruction == T_MNEM_add)
7669 inst.instruction = T_OPCODE_ADD_HI;
7670 inst.instruction |= (Rd & 8) << 4;
7671 inst.instruction |= (Rd & 7);
7672 inst.instruction |= Rn << 3;
7675 /* ... because addition is commutative! */
7678 inst.instruction = T_OPCODE_ADD_HI;
7679 inst.instruction |= (Rd & 8) << 4;
7680 inst.instruction |= (Rd & 7);
7681 inst.instruction |= Rs << 3;
7686 /* If we get here, it can't be done in 16 bits. */
7687 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7688 _("shift must be constant"));
7689 inst.instruction = THUMB_OP32 (inst.instruction);
7690 inst.instruction |= Rd << 8;
7691 inst.instruction |= Rs << 16;
7692 encode_thumb32_shifted_operand (2);
7697 constraint (inst.instruction == T_MNEM_adds
7698 || inst.instruction == T_MNEM_subs,
7701 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7703 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7704 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7707 inst.instruction = (inst.instruction == T_MNEM_add
7709 inst.instruction |= (Rd << 4) | Rs;
7710 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7714 Rn = inst.operands[2].reg;
7715 constraint (inst.operands[2].shifted, _("unshifted register required"));
7717 /* We now have Rd, Rs, and Rn set to registers. */
7718 if (Rd > 7 || Rs > 7 || Rn > 7)
7720 /* Can't do this for SUB. */
7721 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7722 inst.instruction = T_OPCODE_ADD_HI;
7723 inst.instruction |= (Rd & 8) << 4;
7724 inst.instruction |= (Rd & 7);
7726 inst.instruction |= Rn << 3;
7728 inst.instruction |= Rs << 3;
7730 constraint (1, _("dest must overlap one source register"));
7734 inst.instruction = (inst.instruction == T_MNEM_add
7735 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7736 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7744 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7746 /* Defer to section relaxation. */
7747 inst.relax = inst.instruction;
7748 inst.instruction = THUMB_OP16 (inst.instruction);
7749 inst.instruction |= inst.operands[0].reg << 4;
7751 else if (unified_syntax && inst.size_req != 2)
7753 /* Generate a 32-bit opcode. */
7754 inst.instruction = THUMB_OP32 (inst.instruction);
7755 inst.instruction |= inst.operands[0].reg << 8;
7756 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7757 inst.reloc.pc_rel = 1;
7761 /* Generate a 16-bit opcode. */
7762 inst.instruction = THUMB_OP16 (inst.instruction);
7763 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7764 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7765 inst.reloc.pc_rel = 1;
7767 inst.instruction |= inst.operands[0].reg << 4;
7771 /* Arithmetic instructions for which there is just one 16-bit
7772 instruction encoding, and it allows only two low registers.
7773 For maximal compatibility with ARM syntax, we allow three register
7774 operands even when Thumb-32 instructions are not available, as long
7775 as the first two are identical. For instance, both "sbc r0,r1" and
7776 "sbc r0,r0,r1" are allowed. */
7782 Rd = inst.operands[0].reg;
7783 Rs = (inst.operands[1].present
7784 ? inst.operands[1].reg /* Rd, Rs, foo */
7785 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7786 Rn = inst.operands[2].reg;
7790 if (!inst.operands[2].isreg)
7792 /* For an immediate, we always generate a 32-bit opcode;
7793 section relaxation will shrink it later if possible. */
7794 inst.instruction = THUMB_OP32 (inst.instruction);
7795 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7796 inst.instruction |= Rd << 8;
7797 inst.instruction |= Rs << 16;
7798 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7804 /* See if we can do this with a 16-bit instruction. */
7805 if (THUMB_SETS_FLAGS (inst.instruction))
7806 narrow = current_it_mask == 0;
7808 narrow = current_it_mask != 0;
7810 if (Rd > 7 || Rn > 7 || Rs > 7)
7812 if (inst.operands[2].shifted)
7814 if (inst.size_req == 4)
7820 inst.instruction = THUMB_OP16 (inst.instruction);
7821 inst.instruction |= Rd;
7822 inst.instruction |= Rn << 3;
7826 /* If we get here, it can't be done in 16 bits. */
7827 constraint (inst.operands[2].shifted
7828 && inst.operands[2].immisreg,
7829 _("shift must be constant"));
7830 inst.instruction = THUMB_OP32 (inst.instruction);
7831 inst.instruction |= Rd << 8;
7832 inst.instruction |= Rs << 16;
7833 encode_thumb32_shifted_operand (2);
7838 /* On its face this is a lie - the instruction does set the
7839 flags. However, the only supported mnemonic in this mode
7841 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7843 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7844 _("unshifted register required"));
7845 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7846 constraint (Rd != Rs,
7847 _("dest and source1 must be the same register"));
7849 inst.instruction = THUMB_OP16 (inst.instruction);
7850 inst.instruction |= Rd;
7851 inst.instruction |= Rn << 3;
7855 /* Similarly, but for instructions where the arithmetic operation is
7856 commutative, so we can allow either of them to be different from
7857 the destination operand in a 16-bit instruction. For instance, all
7858 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7865 Rd = inst.operands[0].reg;
7866 Rs = (inst.operands[1].present
7867 ? inst.operands[1].reg /* Rd, Rs, foo */
7868 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7869 Rn = inst.operands[2].reg;
7873 if (!inst.operands[2].isreg)
7875 /* For an immediate, we always generate a 32-bit opcode;
7876 section relaxation will shrink it later if possible. */
7877 inst.instruction = THUMB_OP32 (inst.instruction);
7878 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7879 inst.instruction |= Rd << 8;
7880 inst.instruction |= Rs << 16;
7881 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7887 /* See if we can do this with a 16-bit instruction. */
7888 if (THUMB_SETS_FLAGS (inst.instruction))
7889 narrow = current_it_mask == 0;
7891 narrow = current_it_mask != 0;
7893 if (Rd > 7 || Rn > 7 || Rs > 7)
7895 if (inst.operands[2].shifted)
7897 if (inst.size_req == 4)
7904 inst.instruction = THUMB_OP16 (inst.instruction);
7905 inst.instruction |= Rd;
7906 inst.instruction |= Rn << 3;
7911 inst.instruction = THUMB_OP16 (inst.instruction);
7912 inst.instruction |= Rd;
7913 inst.instruction |= Rs << 3;
7918 /* If we get here, it can't be done in 16 bits. */
7919 constraint (inst.operands[2].shifted
7920 && inst.operands[2].immisreg,
7921 _("shift must be constant"));
7922 inst.instruction = THUMB_OP32 (inst.instruction);
7923 inst.instruction |= Rd << 8;
7924 inst.instruction |= Rs << 16;
7925 encode_thumb32_shifted_operand (2);
7930 /* On its face this is a lie - the instruction does set the
7931 flags. However, the only supported mnemonic in this mode
7933 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7935 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7936 _("unshifted register required"));
7937 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7939 inst.instruction = THUMB_OP16 (inst.instruction);
7940 inst.instruction |= Rd;
7943 inst.instruction |= Rn << 3;
7945 inst.instruction |= Rs << 3;
7947 constraint (1, _("dest must overlap one source register"));
7954 if (inst.operands[0].present)
7956 constraint ((inst.instruction & 0xf0) != 0x40
7957 && inst.operands[0].imm != 0xf,
7958 "bad barrier type");
7959 inst.instruction |= inst.operands[0].imm;
7962 inst.instruction |= 0xf;
7968 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7969 constraint (msb > 32, _("bit-field extends past end of register"));
7970 /* The instruction encoding stores the LSB and MSB,
7971 not the LSB and width. */
7972 inst.instruction |= inst.operands[0].reg << 8;
7973 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
7974 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
7975 inst.instruction |= msb - 1;
7983 /* #0 in second position is alternative syntax for bfc, which is
7984 the same instruction but with REG_PC in the Rm field. */
7985 if (!inst.operands[1].isreg)
7986 inst.operands[1].reg = REG_PC;
7988 msb = inst.operands[2].imm + inst.operands[3].imm;
7989 constraint (msb > 32, _("bit-field extends past end of register"));
7990 /* The instruction encoding stores the LSB and MSB,
7991 not the LSB and width. */
7992 inst.instruction |= inst.operands[0].reg << 8;
7993 inst.instruction |= inst.operands[1].reg << 16;
7994 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7995 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7996 inst.instruction |= msb - 1;
8002 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8003 _("bit-field extends past end of register"));
8004 inst.instruction |= inst.operands[0].reg << 8;
8005 inst.instruction |= inst.operands[1].reg << 16;
8006 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8007 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8008 inst.instruction |= inst.operands[3].imm - 1;
8011 /* ARM V5 Thumb BLX (argument parse)
8012 BLX <target_addr> which is BLX(1)
8013 BLX <Rm> which is BLX(2)
8014 Unfortunately, there are two different opcodes for this mnemonic.
8015 So, the insns[].value is not used, and the code here zaps values
8016 into inst.instruction.
8018 ??? How to take advantage of the additional two bits of displacement
8019 available in Thumb32 mode? Need new relocation? */
8024 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8025 if (inst.operands[0].isreg)
8026 /* We have a register, so this is BLX(2). */
8027 inst.instruction |= inst.operands[0].reg << 3;
8030 /* No register. This must be BLX(1). */
8031 inst.instruction = 0xf000e800;
8033 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8034 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8037 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8038 inst.reloc.pc_rel = 1;
8048 if (current_it_mask)
8050 /* Conditional branches inside IT blocks are encoded as unconditional
8053 /* A branch must be the last instruction in an IT block. */
8054 constraint (current_it_mask != 0x10, BAD_BRANCH);
8059 if (cond != COND_ALWAYS)
8060 opcode = T_MNEM_bcond;
8062 opcode = inst.instruction;
8064 if (unified_syntax && inst.size_req == 4)
8066 inst.instruction = THUMB_OP32(opcode);
8067 if (cond == COND_ALWAYS)
8068 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8071 assert (cond != 0xF);
8072 inst.instruction |= cond << 22;
8073 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8078 inst.instruction = THUMB_OP16(opcode);
8079 if (cond == COND_ALWAYS)
8080 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8083 inst.instruction |= cond << 8;
8084 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8086 /* Allow section relaxation. */
8087 if (unified_syntax && inst.size_req != 2)
8088 inst.relax = opcode;
8091 inst.reloc.pc_rel = 1;
8097 constraint (inst.cond != COND_ALWAYS,
8098 _("instruction is always unconditional"));
8099 if (inst.operands[0].present)
8101 constraint (inst.operands[0].imm > 255,
8102 _("immediate value out of range"));
8103 inst.instruction |= inst.operands[0].imm;
8108 do_t_branch23 (void)
8110 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8111 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8112 inst.reloc.pc_rel = 1;
8114 /* If the destination of the branch is a defined symbol which does not have
8115 the THUMB_FUNC attribute, then we must be calling a function which has
8116 the (interfacearm) attribute. We look for the Thumb entry point to that
8117 function and change the branch to refer to that function instead. */
8118 if ( inst.reloc.exp.X_op == O_symbol
8119 && inst.reloc.exp.X_add_symbol != NULL
8120 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8121 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8122 inst.reloc.exp.X_add_symbol =
8123 find_real_start (inst.reloc.exp.X_add_symbol);
8129 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8130 inst.instruction |= inst.operands[0].reg << 3;
8131 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8132 should cause the alignment to be checked once it is known. This is
8133 because BX PC only works if the instruction is word aligned. */
8139 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8140 if (inst.operands[0].reg == REG_PC)
8141 as_tsktsk (_("use of r15 in bxj is not really useful"));
8143 inst.instruction |= inst.operands[0].reg << 16;
8149 inst.instruction |= inst.operands[0].reg << 8;
8150 inst.instruction |= inst.operands[1].reg << 16;
8151 inst.instruction |= inst.operands[1].reg;
8157 constraint (current_it_mask, BAD_NOT_IT);
8158 inst.instruction |= inst.operands[0].imm;
8164 constraint (current_it_mask, BAD_NOT_IT);
8166 && (inst.operands[1].present || inst.size_req == 4)
8167 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8169 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8170 inst.instruction = 0xf3af8000;
8171 inst.instruction |= imod << 9;
8172 inst.instruction |= inst.operands[0].imm << 5;
8173 if (inst.operands[1].present)
8174 inst.instruction |= 0x100 | inst.operands[1].imm;
8178 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8179 && (inst.operands[0].imm & 4),
8180 _("selected processor does not support 'A' form "
8181 "of this instruction"));
8182 constraint (inst.operands[1].present || inst.size_req == 4,
8183 _("Thumb does not support the 2-argument "
8184 "form of this instruction"));
8185 inst.instruction |= inst.operands[0].imm;
8189 /* THUMB CPY instruction (argument parse). */
8194 if (inst.size_req == 4)
8196 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8197 inst.instruction |= inst.operands[0].reg << 8;
8198 inst.instruction |= inst.operands[1].reg;
8202 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8203 inst.instruction |= (inst.operands[0].reg & 0x7);
8204 inst.instruction |= inst.operands[1].reg << 3;
8211 constraint (current_it_mask, BAD_NOT_IT);
8212 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8213 inst.instruction |= inst.operands[0].reg;
8214 inst.reloc.pc_rel = 1;
8215 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8221 inst.instruction |= inst.operands[0].imm;
8227 if (!inst.operands[1].present)
8228 inst.operands[1].reg = inst.operands[0].reg;
8229 inst.instruction |= inst.operands[0].reg << 8;
8230 inst.instruction |= inst.operands[1].reg << 16;
8231 inst.instruction |= inst.operands[2].reg;
8237 if (unified_syntax && inst.size_req == 4)
8238 inst.instruction = THUMB_OP32 (inst.instruction);
8240 inst.instruction = THUMB_OP16 (inst.instruction);
8246 unsigned int cond = inst.operands[0].imm;
8248 constraint (current_it_mask, BAD_NOT_IT);
8249 current_it_mask = (inst.instruction & 0xf) | 0x10;
8252 /* If the condition is a negative condition, invert the mask. */
8253 if ((cond & 0x1) == 0x0)
8255 unsigned int mask = inst.instruction & 0x000f;
8257 if ((mask & 0x7) == 0)
8258 /* no conversion needed */;
8259 else if ((mask & 0x3) == 0)
8261 else if ((mask & 0x1) == 0)
8266 inst.instruction &= 0xfff0;
8267 inst.instruction |= mask;
8270 inst.instruction |= cond << 4;
8276 /* This really doesn't seem worth it. */
8277 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8278 _("expression too complex"));
8279 constraint (inst.operands[1].writeback,
8280 _("Thumb load/store multiple does not support {reglist}^"));
8284 /* See if we can use a 16-bit instruction. */
8285 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8286 && inst.size_req != 4
8287 && inst.operands[0].reg <= 7
8288 && !(inst.operands[1].imm & ~0xff)
8289 && (inst.instruction == T_MNEM_stmia
8290 ? inst.operands[0].writeback
8291 : (inst.operands[0].writeback
8292 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8294 if (inst.instruction == T_MNEM_stmia
8295 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8296 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8297 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8298 inst.operands[0].reg);
8300 inst.instruction = THUMB_OP16 (inst.instruction);
8301 inst.instruction |= inst.operands[0].reg << 8;
8302 inst.instruction |= inst.operands[1].imm;
8306 if (inst.operands[1].imm & (1 << 13))
8307 as_warn (_("SP should not be in register list"));
8308 if (inst.instruction == T_MNEM_stmia)
8310 if (inst.operands[1].imm & (1 << 15))
8311 as_warn (_("PC should not be in register list"));
8312 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8313 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8314 inst.operands[0].reg);
8318 if (inst.operands[1].imm & (1 << 14)
8319 && inst.operands[1].imm & (1 << 15))
8320 as_warn (_("LR and PC should not both be in register list"));
8321 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8322 && inst.operands[0].writeback)
8323 as_warn (_("base register should not be in register list "
8324 "when written back"));
8326 if (inst.instruction < 0xffff)
8327 inst.instruction = THUMB_OP32 (inst.instruction);
8328 inst.instruction |= inst.operands[0].reg << 16;
8329 inst.instruction |= inst.operands[1].imm;
8330 if (inst.operands[0].writeback)
8331 inst.instruction |= WRITE_BACK;
8336 constraint (inst.operands[0].reg > 7
8337 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8338 if (inst.instruction == T_MNEM_stmia)
8340 if (!inst.operands[0].writeback)
8341 as_warn (_("this instruction will write back the base register"));
8342 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8343 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8344 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8345 inst.operands[0].reg);
8349 if (!inst.operands[0].writeback
8350 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8351 as_warn (_("this instruction will write back the base register"));
8352 else if (inst.operands[0].writeback
8353 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8354 as_warn (_("this instruction will not write back the base register"));
8357 inst.instruction = THUMB_OP16 (inst.instruction);
8358 inst.instruction |= inst.operands[0].reg << 8;
8359 inst.instruction |= inst.operands[1].imm;
8366 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8367 || inst.operands[1].postind || inst.operands[1].writeback
8368 || inst.operands[1].immisreg || inst.operands[1].shifted
8369 || inst.operands[1].negative,
8372 inst.instruction |= inst.operands[0].reg << 12;
8373 inst.instruction |= inst.operands[1].reg << 16;
8374 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8380 if (!inst.operands[1].present)
8382 constraint (inst.operands[0].reg == REG_LR,
8383 _("r14 not allowed as first register "
8384 "when second register is omitted"));
8385 inst.operands[1].reg = inst.operands[0].reg + 1;
8387 constraint (inst.operands[0].reg == inst.operands[1].reg,
8390 inst.instruction |= inst.operands[0].reg << 12;
8391 inst.instruction |= inst.operands[1].reg << 8;
8392 inst.instruction |= inst.operands[2].reg << 16;
8398 unsigned long opcode;
8401 opcode = inst.instruction;
8404 if (!inst.operands[1].isreg)
8406 if (opcode <= 0xffff)
8407 inst.instruction = THUMB_OP32 (opcode);
8408 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8411 if (inst.operands[1].isreg
8412 && !inst.operands[1].writeback
8413 && !inst.operands[1].shifted && !inst.operands[1].postind
8414 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8416 && inst.size_req != 4)
8418 /* Insn may have a 16-bit form. */
8419 Rn = inst.operands[1].reg;
8420 if (inst.operands[1].immisreg)
8422 inst.instruction = THUMB_OP16 (opcode);
8424 if (Rn <= 7 && inst.operands[1].imm <= 7)
8427 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8428 && opcode != T_MNEM_ldrsb)
8429 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8430 || (Rn == REG_SP && opcode == T_MNEM_str))
8437 if (inst.reloc.pc_rel)
8438 opcode = T_MNEM_ldr_pc2;
8440 opcode = T_MNEM_ldr_pc;
8444 if (opcode == T_MNEM_ldr)
8445 opcode = T_MNEM_ldr_sp;
8447 opcode = T_MNEM_str_sp;
8449 inst.instruction = inst.operands[0].reg << 8;
8453 inst.instruction = inst.operands[0].reg;
8454 inst.instruction |= inst.operands[1].reg << 3;
8456 inst.instruction |= THUMB_OP16 (opcode);
8457 if (inst.size_req == 2)
8458 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8460 inst.relax = opcode;
8464 /* Definitely a 32-bit variant. */
8465 inst.instruction = THUMB_OP32 (opcode);
8466 inst.instruction |= inst.operands[0].reg << 12;
8467 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8471 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8473 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8475 /* Only [Rn,Rm] is acceptable. */
8476 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8477 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8478 || inst.operands[1].postind || inst.operands[1].shifted
8479 || inst.operands[1].negative,
8480 _("Thumb does not support this addressing mode"));
8481 inst.instruction = THUMB_OP16 (inst.instruction);
8485 inst.instruction = THUMB_OP16 (inst.instruction);
8486 if (!inst.operands[1].isreg)
8487 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8490 constraint (!inst.operands[1].preind
8491 || inst.operands[1].shifted
8492 || inst.operands[1].writeback,
8493 _("Thumb does not support this addressing mode"));
8494 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8496 constraint (inst.instruction & 0x0600,
8497 _("byte or halfword not valid for base register"));
8498 constraint (inst.operands[1].reg == REG_PC
8499 && !(inst.instruction & THUMB_LOAD_BIT),
8500 _("r15 based store not allowed"));
8501 constraint (inst.operands[1].immisreg,
8502 _("invalid base register for register offset"));
8504 if (inst.operands[1].reg == REG_PC)
8505 inst.instruction = T_OPCODE_LDR_PC;
8506 else if (inst.instruction & THUMB_LOAD_BIT)
8507 inst.instruction = T_OPCODE_LDR_SP;
8509 inst.instruction = T_OPCODE_STR_SP;
8511 inst.instruction |= inst.operands[0].reg << 8;
8512 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8516 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8517 if (!inst.operands[1].immisreg)
8519 /* Immediate offset. */
8520 inst.instruction |= inst.operands[0].reg;
8521 inst.instruction |= inst.operands[1].reg << 3;
8522 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8526 /* Register offset. */
8527 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8528 constraint (inst.operands[1].negative,
8529 _("Thumb does not support this addressing mode"));
8532 switch (inst.instruction)
8534 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8535 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8536 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8537 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8538 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8539 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8540 case 0x5600 /* ldrsb */:
8541 case 0x5e00 /* ldrsh */: break;
8545 inst.instruction |= inst.operands[0].reg;
8546 inst.instruction |= inst.operands[1].reg << 3;
8547 inst.instruction |= inst.operands[1].imm << 6;
8553 if (!inst.operands[1].present)
8555 inst.operands[1].reg = inst.operands[0].reg + 1;
8556 constraint (inst.operands[0].reg == REG_LR,
8557 _("r14 not allowed here"));
8559 inst.instruction |= inst.operands[0].reg << 12;
8560 inst.instruction |= inst.operands[1].reg << 8;
8561 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8568 inst.instruction |= inst.operands[0].reg << 12;
8569 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8575 inst.instruction |= inst.operands[0].reg << 8;
8576 inst.instruction |= inst.operands[1].reg << 16;
8577 inst.instruction |= inst.operands[2].reg;
8578 inst.instruction |= inst.operands[3].reg << 12;
8584 inst.instruction |= inst.operands[0].reg << 12;
8585 inst.instruction |= inst.operands[1].reg << 8;
8586 inst.instruction |= inst.operands[2].reg << 16;
8587 inst.instruction |= inst.operands[3].reg;
8595 int r0off = (inst.instruction == T_MNEM_mov
8596 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8597 unsigned long opcode;
8599 bfd_boolean low_regs;
8601 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8602 opcode = inst.instruction;
8603 if (current_it_mask)
8604 narrow = opcode != T_MNEM_movs;
8606 narrow = opcode != T_MNEM_movs || low_regs;
8607 if (inst.size_req == 4
8608 || inst.operands[1].shifted)
8611 if (!inst.operands[1].isreg)
8613 /* Immediate operand. */
8614 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8616 if (low_regs && narrow)
8618 inst.instruction = THUMB_OP16 (opcode);
8619 inst.instruction |= inst.operands[0].reg << 8;
8620 if (inst.size_req == 2)
8621 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8623 inst.relax = opcode;
8627 inst.instruction = THUMB_OP32 (inst.instruction);
8628 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8629 inst.instruction |= inst.operands[0].reg << r0off;
8630 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8635 inst.instruction = THUMB_OP32 (inst.instruction);
8636 inst.instruction |= inst.operands[0].reg << r0off;
8637 encode_thumb32_shifted_operand (1);
8640 switch (inst.instruction)
8643 inst.instruction = T_OPCODE_MOV_HR;
8644 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8645 inst.instruction |= (inst.operands[0].reg & 0x7);
8646 inst.instruction |= inst.operands[1].reg << 3;
8650 /* We know we have low registers at this point.
8651 Generate ADD Rd, Rs, #0. */
8652 inst.instruction = T_OPCODE_ADD_I3;
8653 inst.instruction |= inst.operands[0].reg;
8654 inst.instruction |= inst.operands[1].reg << 3;
8660 inst.instruction = T_OPCODE_CMP_LR;
8661 inst.instruction |= inst.operands[0].reg;
8662 inst.instruction |= inst.operands[1].reg << 3;
8666 inst.instruction = T_OPCODE_CMP_HR;
8667 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8668 inst.instruction |= (inst.operands[0].reg & 0x7);
8669 inst.instruction |= inst.operands[1].reg << 3;
8676 inst.instruction = THUMB_OP16 (inst.instruction);
8677 if (inst.operands[1].isreg)
8679 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8681 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8682 since a MOV instruction produces unpredictable results. */
8683 if (inst.instruction == T_OPCODE_MOV_I8)
8684 inst.instruction = T_OPCODE_ADD_I3;
8686 inst.instruction = T_OPCODE_CMP_LR;
8688 inst.instruction |= inst.operands[0].reg;
8689 inst.instruction |= inst.operands[1].reg << 3;
8693 if (inst.instruction == T_OPCODE_MOV_I8)
8694 inst.instruction = T_OPCODE_MOV_HR;
8696 inst.instruction = T_OPCODE_CMP_HR;
8702 constraint (inst.operands[0].reg > 7,
8703 _("only lo regs allowed with immediate"));
8704 inst.instruction |= inst.operands[0].reg << 8;
8705 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8712 inst.instruction |= inst.operands[0].reg << 8;
8713 inst.instruction |= (inst.operands[1].imm & 0xf000) << 4;
8714 inst.instruction |= (inst.operands[1].imm & 0x0800) << 15;
8715 inst.instruction |= (inst.operands[1].imm & 0x0700) << 4;
8716 inst.instruction |= (inst.operands[1].imm & 0x00ff);
8724 int r0off = (inst.instruction == T_MNEM_mvn
8725 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8728 if (inst.size_req == 4
8729 || inst.instruction > 0xffff
8730 || inst.operands[1].shifted
8731 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8733 else if (inst.instruction == T_MNEM_cmn)
8735 else if (THUMB_SETS_FLAGS (inst.instruction))
8736 narrow = (current_it_mask == 0);
8738 narrow = (current_it_mask != 0);
8740 if (!inst.operands[1].isreg)
8742 /* For an immediate, we always generate a 32-bit opcode;
8743 section relaxation will shrink it later if possible. */
8744 if (inst.instruction < 0xffff)
8745 inst.instruction = THUMB_OP32 (inst.instruction);
8746 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8747 inst.instruction |= inst.operands[0].reg << r0off;
8748 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8752 /* See if we can do this with a 16-bit instruction. */
8755 inst.instruction = THUMB_OP16 (inst.instruction);
8756 inst.instruction |= inst.operands[0].reg;
8757 inst.instruction |= inst.operands[1].reg << 3;
8761 constraint (inst.operands[1].shifted
8762 && inst.operands[1].immisreg,
8763 _("shift must be constant"));
8764 if (inst.instruction < 0xffff)
8765 inst.instruction = THUMB_OP32 (inst.instruction);
8766 inst.instruction |= inst.operands[0].reg << r0off;
8767 encode_thumb32_shifted_operand (1);
8773 constraint (inst.instruction > 0xffff
8774 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8775 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8776 _("unshifted register required"));
8777 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8780 inst.instruction = THUMB_OP16 (inst.instruction);
8781 inst.instruction |= inst.operands[0].reg;
8782 inst.instruction |= inst.operands[1].reg << 3;
8790 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8793 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8794 _("selected processor does not support "
8795 "requested special purpose register"));
8799 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8800 _("selected processor does not support "
8801 "requested special purpose register %x"));
8802 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8803 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8804 _("'CPSR' or 'SPSR' expected"));
8807 inst.instruction |= inst.operands[0].reg << 8;
8808 inst.instruction |= (flags & SPSR_BIT) >> 2;
8809 inst.instruction |= inst.operands[1].imm & 0xff;
8817 constraint (!inst.operands[1].isreg,
8818 _("Thumb encoding does not support an immediate here"));
8819 flags = inst.operands[0].imm;
8822 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8823 _("selected processor does not support "
8824 "requested special purpose register"));
8828 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8829 _("selected processor does not support "
8830 "requested special purpose register"));
8833 inst.instruction |= (flags & SPSR_BIT) >> 2;
8834 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8835 inst.instruction |= (flags & 0xff);
8836 inst.instruction |= inst.operands[1].reg << 16;
8842 if (!inst.operands[2].present)
8843 inst.operands[2].reg = inst.operands[0].reg;
8845 /* There is no 32-bit MULS and no 16-bit MUL. */
8846 if (unified_syntax && inst.instruction == T_MNEM_mul)
8848 inst.instruction = THUMB_OP32 (inst.instruction);
8849 inst.instruction |= inst.operands[0].reg << 8;
8850 inst.instruction |= inst.operands[1].reg << 16;
8851 inst.instruction |= inst.operands[2].reg << 0;
8855 constraint (!unified_syntax
8856 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8857 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8860 inst.instruction = THUMB_OP16 (inst.instruction);
8861 inst.instruction |= inst.operands[0].reg;
8863 if (inst.operands[0].reg == inst.operands[1].reg)
8864 inst.instruction |= inst.operands[2].reg << 3;
8865 else if (inst.operands[0].reg == inst.operands[2].reg)
8866 inst.instruction |= inst.operands[1].reg << 3;
8868 constraint (1, _("dest must overlap one source register"));
8875 inst.instruction |= inst.operands[0].reg << 12;
8876 inst.instruction |= inst.operands[1].reg << 8;
8877 inst.instruction |= inst.operands[2].reg << 16;
8878 inst.instruction |= inst.operands[3].reg;
8880 if (inst.operands[0].reg == inst.operands[1].reg)
8881 as_tsktsk (_("rdhi and rdlo must be different"));
8889 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8891 inst.instruction = THUMB_OP32 (inst.instruction);
8892 inst.instruction |= inst.operands[0].imm;
8896 inst.instruction = THUMB_OP16 (inst.instruction);
8897 inst.instruction |= inst.operands[0].imm << 4;
8902 constraint (inst.operands[0].present,
8903 _("Thumb does not support NOP with hints"));
8904 inst.instruction = 0x46c0;
8915 if (THUMB_SETS_FLAGS (inst.instruction))
8916 narrow = (current_it_mask == 0);
8918 narrow = (current_it_mask != 0);
8919 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8921 if (inst.size_req == 4)
8926 inst.instruction = THUMB_OP32 (inst.instruction);
8927 inst.instruction |= inst.operands[0].reg << 8;
8928 inst.instruction |= inst.operands[1].reg << 16;
8932 inst.instruction = THUMB_OP16 (inst.instruction);
8933 inst.instruction |= inst.operands[0].reg;
8934 inst.instruction |= inst.operands[1].reg << 3;
8939 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8941 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8943 inst.instruction = THUMB_OP16 (inst.instruction);
8944 inst.instruction |= inst.operands[0].reg;
8945 inst.instruction |= inst.operands[1].reg << 3;
8952 inst.instruction |= inst.operands[0].reg << 8;
8953 inst.instruction |= inst.operands[1].reg << 16;
8954 inst.instruction |= inst.operands[2].reg;
8955 if (inst.operands[3].present)
8957 unsigned int val = inst.reloc.exp.X_add_number;
8958 constraint (inst.reloc.exp.X_op != O_constant,
8959 _("expression too complex"));
8960 inst.instruction |= (val & 0x1c) << 10;
8961 inst.instruction |= (val & 0x03) << 6;
8968 if (!inst.operands[3].present)
8969 inst.instruction &= ~0x00000020;
8976 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
8980 do_t_push_pop (void)
8984 constraint (inst.operands[0].writeback,
8985 _("push/pop do not support {reglist}^"));
8986 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8987 _("expression too complex"));
8989 mask = inst.operands[0].imm;
8990 if ((mask & ~0xff) == 0)
8991 inst.instruction = THUMB_OP16 (inst.instruction);
8992 else if ((inst.instruction == T_MNEM_push
8993 && (mask & ~0xff) == 1 << REG_LR)
8994 || (inst.instruction == T_MNEM_pop
8995 && (mask & ~0xff) == 1 << REG_PC))
8997 inst.instruction = THUMB_OP16 (inst.instruction);
8998 inst.instruction |= THUMB_PP_PC_LR;
9001 else if (unified_syntax)
9003 if (mask & (1 << 13))
9004 inst.error = _("SP not allowed in register list");
9005 if (inst.instruction == T_MNEM_push)
9007 if (mask & (1 << 15))
9008 inst.error = _("PC not allowed in register list");
9012 if (mask & (1 << 14)
9013 && mask & (1 << 15))
9014 inst.error = _("LR and PC should not both be in register list");
9016 if ((mask & (mask - 1)) == 0)
9018 /* Single register push/pop implemented as str/ldr. */
9019 if (inst.instruction == T_MNEM_push)
9020 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9022 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9023 mask = ffs(mask) - 1;
9027 inst.instruction = THUMB_OP32 (inst.instruction);
9031 inst.error = _("invalid register list to push/pop instruction");
9035 inst.instruction |= mask;
9041 inst.instruction |= inst.operands[0].reg << 8;
9042 inst.instruction |= inst.operands[1].reg << 16;
9048 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9049 && inst.size_req != 4)
9051 inst.instruction = THUMB_OP16 (inst.instruction);
9052 inst.instruction |= inst.operands[0].reg;
9053 inst.instruction |= inst.operands[1].reg << 3;
9055 else if (unified_syntax)
9057 inst.instruction = THUMB_OP32 (inst.instruction);
9058 inst.instruction |= inst.operands[0].reg << 8;
9059 inst.instruction |= inst.operands[1].reg << 16;
9060 inst.instruction |= inst.operands[1].reg;
9063 inst.error = BAD_HIREG;
9071 Rd = inst.operands[0].reg;
9072 Rs = (inst.operands[1].present
9073 ? inst.operands[1].reg /* Rd, Rs, foo */
9074 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9076 inst.instruction |= Rd << 8;
9077 inst.instruction |= Rs << 16;
9078 if (!inst.operands[2].isreg)
9080 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9081 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9084 encode_thumb32_shifted_operand (2);
9090 constraint (current_it_mask, BAD_NOT_IT);
9091 if (inst.operands[0].imm)
9092 inst.instruction |= 0x8;
9098 if (!inst.operands[1].present)
9099 inst.operands[1].reg = inst.operands[0].reg;
9106 switch (inst.instruction)
9109 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9111 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9113 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9115 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9119 if (THUMB_SETS_FLAGS (inst.instruction))
9120 narrow = (current_it_mask == 0);
9122 narrow = (current_it_mask != 0);
9123 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9125 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9127 if (inst.operands[2].isreg
9128 && (inst.operands[1].reg != inst.operands[0].reg
9129 || inst.operands[2].reg > 7))
9131 if (inst.size_req == 4)
9136 if (inst.operands[2].isreg)
9138 inst.instruction = THUMB_OP32 (inst.instruction);
9139 inst.instruction |= inst.operands[0].reg << 8;
9140 inst.instruction |= inst.operands[1].reg << 16;
9141 inst.instruction |= inst.operands[2].reg;
9145 inst.operands[1].shifted = 1;
9146 inst.operands[1].shift_kind = shift_kind;
9147 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9148 ? T_MNEM_movs : T_MNEM_mov);
9149 inst.instruction |= inst.operands[0].reg << 8;
9150 encode_thumb32_shifted_operand (1);
9151 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9152 inst.reloc.type = BFD_RELOC_UNUSED;
9157 if (inst.operands[2].isreg)
9161 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9162 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9163 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9164 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9168 inst.instruction |= inst.operands[0].reg;
9169 inst.instruction |= inst.operands[2].reg << 3;
9175 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9176 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9177 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9180 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9181 inst.instruction |= inst.operands[0].reg;
9182 inst.instruction |= inst.operands[1].reg << 3;
9188 constraint (inst.operands[0].reg > 7
9189 || inst.operands[1].reg > 7, BAD_HIREG);
9190 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9192 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9194 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9195 constraint (inst.operands[0].reg != inst.operands[1].reg,
9196 _("source1 and dest must be same register"));
9198 switch (inst.instruction)
9200 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9201 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9202 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9203 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9207 inst.instruction |= inst.operands[0].reg;
9208 inst.instruction |= inst.operands[2].reg << 3;
9212 switch (inst.instruction)
9214 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9215 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9216 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9217 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9220 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9221 inst.instruction |= inst.operands[0].reg;
9222 inst.instruction |= inst.operands[1].reg << 3;
9230 inst.instruction |= inst.operands[0].reg << 8;
9231 inst.instruction |= inst.operands[1].reg << 16;
9232 inst.instruction |= inst.operands[2].reg;
9238 unsigned int value = inst.reloc.exp.X_add_number;
9239 constraint (inst.reloc.exp.X_op != O_constant,
9240 _("expression too complex"));
9241 inst.reloc.type = BFD_RELOC_UNUSED;
9242 inst.instruction |= (value & 0xf000) >> 12;
9243 inst.instruction |= (value & 0x0ff0);
9244 inst.instruction |= (value & 0x000f) << 16;
9250 inst.instruction |= inst.operands[0].reg << 8;
9251 inst.instruction |= inst.operands[1].imm - 1;
9252 inst.instruction |= inst.operands[2].reg << 16;
9254 if (inst.operands[3].present)
9256 constraint (inst.reloc.exp.X_op != O_constant,
9257 _("expression too complex"));
9259 if (inst.reloc.exp.X_add_number != 0)
9261 if (inst.operands[3].shift_kind == SHIFT_ASR)
9262 inst.instruction |= 0x00200000; /* sh bit */
9263 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9264 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9266 inst.reloc.type = BFD_RELOC_UNUSED;
9273 inst.instruction |= inst.operands[0].reg << 8;
9274 inst.instruction |= inst.operands[1].imm - 1;
9275 inst.instruction |= inst.operands[2].reg << 16;
9281 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9282 || inst.operands[2].postind || inst.operands[2].writeback
9283 || inst.operands[2].immisreg || inst.operands[2].shifted
9284 || inst.operands[2].negative,
9287 inst.instruction |= inst.operands[0].reg << 8;
9288 inst.instruction |= inst.operands[1].reg << 12;
9289 inst.instruction |= inst.operands[2].reg << 16;
9290 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9296 if (!inst.operands[2].present)
9297 inst.operands[2].reg = inst.operands[1].reg + 1;
9299 constraint (inst.operands[0].reg == inst.operands[1].reg
9300 || inst.operands[0].reg == inst.operands[2].reg
9301 || inst.operands[0].reg == inst.operands[3].reg
9302 || inst.operands[1].reg == inst.operands[2].reg,
9305 inst.instruction |= inst.operands[0].reg;
9306 inst.instruction |= inst.operands[1].reg << 12;
9307 inst.instruction |= inst.operands[2].reg << 8;
9308 inst.instruction |= inst.operands[3].reg << 16;
9314 inst.instruction |= inst.operands[0].reg << 8;
9315 inst.instruction |= inst.operands[1].reg << 16;
9316 inst.instruction |= inst.operands[2].reg;
9317 inst.instruction |= inst.operands[3].imm << 4;
9323 if (inst.instruction <= 0xffff && inst.size_req != 4
9324 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9325 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9327 inst.instruction = THUMB_OP16 (inst.instruction);
9328 inst.instruction |= inst.operands[0].reg;
9329 inst.instruction |= inst.operands[1].reg << 3;
9331 else if (unified_syntax)
9333 if (inst.instruction <= 0xffff)
9334 inst.instruction = THUMB_OP32 (inst.instruction);
9335 inst.instruction |= inst.operands[0].reg << 8;
9336 inst.instruction |= inst.operands[1].reg;
9337 inst.instruction |= inst.operands[2].imm << 4;
9341 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9342 _("Thumb encoding does not support rotation"));
9343 constraint (1, BAD_HIREG);
9350 inst.reloc.type = BFD_RELOC_ARM_SWI;
9358 half = (inst.instruction & 0x10) != 0;
9359 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9360 constraint (inst.operands[0].immisreg,
9361 _("instruction requires register index"));
9362 constraint (inst.operands[0].imm == 15,
9363 _("PC is not a valid index register"));
9364 constraint (!half && inst.operands[0].shifted,
9365 _("instruction does not allow shifted index"));
9366 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9372 inst.instruction |= inst.operands[0].reg << 8;
9373 inst.instruction |= inst.operands[1].imm;
9374 inst.instruction |= inst.operands[2].reg << 16;
9376 if (inst.operands[3].present)
9378 constraint (inst.reloc.exp.X_op != O_constant,
9379 _("expression too complex"));
9380 if (inst.reloc.exp.X_add_number != 0)
9382 if (inst.operands[3].shift_kind == SHIFT_ASR)
9383 inst.instruction |= 0x00200000; /* sh bit */
9385 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9386 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9388 inst.reloc.type = BFD_RELOC_UNUSED;
9395 inst.instruction |= inst.operands[0].reg << 8;
9396 inst.instruction |= inst.operands[1].imm;
9397 inst.instruction |= inst.operands[2].reg << 16;
9400 /* Neon instruction encoder helpers. */
9402 /* Encodings for the different types for various Neon opcodes. */
9404 /* An "invalid" code for the following tables. */
9407 struct neon_tab_entry
9410 unsigned float_or_poly;
9411 unsigned scalar_or_imm;
9414 /* Map overloaded Neon opcodes to their respective encodings. */
9415 #define NEON_ENC_TAB \
9416 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9417 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9418 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9419 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9420 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9421 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9422 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9423 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9424 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9425 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9426 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9427 /* Register variants of the following two instructions are encoded as
9428 vcge / vcgt with the operands reversed. */ \
9429 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9430 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9431 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9432 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9433 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9434 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9435 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9436 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9437 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9438 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9439 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9440 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9441 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9442 X(vshl, 0x0000400, N_INV, 0x0800510), \
9443 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9444 X(vand, 0x0000110, N_INV, 0x0800030), \
9445 X(vbic, 0x0100110, N_INV, 0x0800030), \
9446 X(veor, 0x1000110, N_INV, N_INV), \
9447 X(vorn, 0x0300110, N_INV, 0x0800010), \
9448 X(vorr, 0x0200110, N_INV, 0x0800010), \
9449 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9450 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9451 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9452 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9453 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9454 X(vst1, 0x0000000, 0x0800000, N_INV), \
9455 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9456 X(vst2, 0x0000100, 0x0800100, N_INV), \
9457 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9458 X(vst3, 0x0000200, 0x0800200, N_INV), \
9459 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9460 X(vst4, 0x0000300, 0x0800300, N_INV), \
9461 X(vmovn, 0x1b20200, N_INV, N_INV), \
9462 X(vtrn, 0x1b20080, N_INV, N_INV), \
9463 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9464 X(vqmovun, 0x1b20240, N_INV, N_INV)
9468 #define X(OPC,I,F,S) N_MNEM_##OPC
9473 static const struct neon_tab_entry neon_enc_tab[] =
9475 #define X(OPC,I,F,S) { (I), (F), (S) }
9480 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9481 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9482 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9483 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9484 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9485 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9486 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9487 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9488 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9490 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9491 shapes which an instruction can accept. The following mnemonic characters
9492 are used in the tag names for this enumeration:
9494 D - Neon D<n> register
9495 Q - Neon Q<n> register
9499 L - D<n> register list
9540 /* Bit masks used in type checking given instructions.
9541 'N_EQK' means the type must be the same as (or based on in some way) the key
9542 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9543 set, various other bits can be set as well in order to modify the meaning of
9544 the type constraint. */
9567 N_KEY = 0x080000, /* key element (main type specifier). */
9568 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
9569 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9570 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9571 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9572 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9573 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9574 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9575 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9577 N_MAX_NONSPECIAL = N_F32
9580 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9582 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9583 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9584 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9585 #define N_SUF_32 (N_SU_32 | N_F32)
9586 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9587 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9589 /* Pass this as the first type argument to neon_check_type to ignore types
9591 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9593 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9594 specific shape when there are two alternatives. For non-polymorphic shapes,
9595 checking is done during operand parsing, so is not implemented here. */
9597 static enum neon_shape
9598 neon_check_shape (enum neon_shape req)
9600 #define RR(X) (inst.operands[(X)].isreg)
9601 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9602 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9603 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9604 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9606 /* Fix missing optional operands. FIXME: we don't know at this point how
9607 many arguments we should have, so this makes the assumption that we have
9608 > 1. This is true of all current Neon opcodes, I think, but may not be
9609 true in the future. */
9610 if (!inst.operands[1].present)
9611 inst.operands[1] = inst.operands[0];
9617 if (RD(0) && RD(1) && RD(2))
9619 else if (RQ(0) && RQ(1) && RQ(2))
9622 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9629 if (RD(0) && RD(1) && IM(2))
9631 else if (RQ(0) && RQ(1) && IM(2))
9634 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9641 if (RD(0) && RD(1) && RD(2) && IM(3))
9643 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9646 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9647 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9653 if (RD(0) && RD(1) && SC(2))
9655 else if (RQ(0) && RQ(1) && SC(2))
9658 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9667 else if (RQ(0) && RQ(1))
9670 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9678 else if (RQ(0) && SC(1))
9681 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9689 else if (RQ(0) && RR(1))
9692 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9700 else if (RQ(0) && IM(1))
9703 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9720 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9723 /* Allow modification to be made to types which are constrained to be
9724 based on the key element, based on bits set alongside N_EQK. */
9725 if ((typebits & N_EQK) != 0)
9727 if ((typebits & N_HLF) != 0)
9729 else if ((typebits & N_DBL) != 0)
9731 if ((typebits & N_SGN) != 0)
9732 *g_type = NT_signed;
9733 else if ((typebits & N_UNS) != 0)
9734 *g_type = NT_unsigned;
9735 else if ((typebits & N_INT) != 0)
9736 *g_type = NT_integer;
9737 else if ((typebits & N_FLT) != 0)
9739 else if ((typebits & N_SIZ) != 0)
9740 *g_type = NT_untyped;
9744 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9745 operand type, i.e. the single type specified in a Neon instruction when it
9746 is the only one given. */
9748 static struct neon_type_el
9749 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9751 struct neon_type_el dest = *key;
9753 assert ((thisarg & N_EQK) != 0);
9755 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9760 /* Convert Neon type and size into compact bitmask representation. */
9762 static enum neon_type_mask
9763 type_chk_of_el_type (enum neon_el_type type, unsigned size)
9771 case 16: return N_16;
9772 case 32: return N_32;
9773 case 64: return N_64;
9781 case 8: return N_I8;
9782 case 16: return N_I16;
9783 case 32: return N_I32;
9784 case 64: return N_I64;
9797 case 8: return N_P8;
9798 case 16: return N_P16;
9806 case 8: return N_S8;
9807 case 16: return N_S16;
9808 case 32: return N_S32;
9809 case 64: return N_S64;
9817 case 8: return N_U8;
9818 case 16: return N_U16;
9819 case 32: return N_U32;
9820 case 64: return N_U64;
9831 /* Convert compact Neon bitmask type representation to a type and size. Only
9832 handles the case where a single bit is set in the mask. */
9835 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9836 enum neon_type_mask mask)
9838 if ((mask & N_EQK) != 0)
9841 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9843 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9845 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9847 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9852 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9854 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9855 *type = NT_unsigned;
9856 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9858 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9860 else if ((mask & (N_P8 | N_P16)) != 0)
9862 else if ((mask & N_F32) != 0)
9870 /* Modify a bitmask of allowed types. This is only needed for type
9874 modify_types_allowed (unsigned allowed, unsigned mods)
9877 enum neon_el_type type;
9883 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9885 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
9887 neon_modify_type_size (mods, &type, &size);
9888 destmask |= type_chk_of_el_type (type, size);
9895 /* Check type and return type classification.
9896 The manual states (paraphrase): If one datatype is given, it indicates the
9898 - the second operand, if there is one
9899 - the operand, if there is no second operand
9900 - the result, if there are no operands.
9901 This isn't quite good enough though, so we use a concept of a "key" datatype
9902 which is set on a per-instruction basis, which is the one which matters when
9903 only one data type is written.
9904 Note: this function has side-effects (e.g. filling in missing operands). All
9905 Neon instructions should call it before performing bit encoding.
9908 static struct neon_type_el
9909 neon_check_type (unsigned els, enum neon_shape ns, ...)
9912 unsigned i, pass, key_el = 0;
9913 unsigned types[NEON_MAX_TYPE_ELS];
9914 enum neon_el_type k_type = NT_invtype;
9915 unsigned k_size = -1u;
9916 struct neon_type_el badtype = {NT_invtype, -1};
9917 unsigned key_allowed = 0;
9919 /* Optional registers in Neon instructions are always (not) in operand 1.
9920 Fill in the missing operand here, if it was omitted. */
9921 if (els > 1 && !inst.operands[1].present)
9922 inst.operands[1] = inst.operands[0];
9924 /* Suck up all the varargs. */
9926 for (i = 0; i < els; i++)
9928 unsigned thisarg = va_arg (ap, unsigned);
9929 if (thisarg == N_IGNORE_TYPE)
9935 if ((thisarg & N_KEY) != 0)
9940 if (inst.vectype.elems > 0)
9941 for (i = 0; i < els; i++)
9942 if (inst.operands[i].vectype.type != NT_invtype)
9944 first_error (_("types specified in both the mnemonic and operands"));
9948 /* Duplicate inst.vectype elements here as necessary.
9949 FIXME: No idea if this is exactly the same as the ARM assembler,
9950 particularly when an insn takes one register and one non-register
9952 if (inst.vectype.elems == 1 && els > 1)
9955 inst.vectype.elems = els;
9956 inst.vectype.el[key_el] = inst.vectype.el[0];
9957 for (j = 0; j < els; j++)
9959 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9962 else if (inst.vectype.elems == 0 && els > 0)
9965 /* No types were given after the mnemonic, so look for types specified
9966 after each operand. We allow some flexibility here; as long as the
9967 "key" operand has a type, we can infer the others. */
9968 for (j = 0; j < els; j++)
9969 if (inst.operands[j].vectype.type != NT_invtype)
9970 inst.vectype.el[j] = inst.operands[j].vectype;
9972 if (inst.operands[key_el].vectype.type != NT_invtype)
9974 for (j = 0; j < els; j++)
9975 if (inst.operands[j].vectype.type == NT_invtype)
9976 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9981 first_error (_("operand types can't be inferred"));
9985 else if (inst.vectype.elems != els)
9987 first_error (_("type specifier has the wrong number of parts"));
9991 for (pass = 0; pass < 2; pass++)
9993 for (i = 0; i < els; i++)
9995 unsigned thisarg = types[i];
9996 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
9997 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
9998 enum neon_el_type g_type = inst.vectype.el[i].type;
9999 unsigned g_size = inst.vectype.el[i].size;
10001 /* Decay more-specific signed & unsigned types to sign-insensitive
10002 integer types if sign-specific variants are unavailable. */
10003 if ((g_type == NT_signed || g_type == NT_unsigned)
10004 && (types_allowed & N_SU_ALL) == 0)
10005 g_type = NT_integer;
10007 /* If only untyped args are allowed, decay any more specific types to
10008 them. Some instructions only care about signs for some element
10009 sizes, so handle that properly. */
10010 if ((g_size == 8 && (types_allowed & N_8) != 0)
10011 || (g_size == 16 && (types_allowed & N_16) != 0)
10012 || (g_size == 32 && (types_allowed & N_32) != 0)
10013 || (g_size == 64 && (types_allowed & N_64) != 0))
10014 g_type = NT_untyped;
10018 if ((thisarg & N_KEY) != 0)
10022 key_allowed = thisarg & ~N_KEY;
10027 if ((thisarg & N_EQK) == 0)
10029 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10031 if ((given_type & types_allowed) == 0)
10033 first_error (_("bad type in Neon instruction"));
10039 enum neon_el_type mod_k_type = k_type;
10040 unsigned mod_k_size = k_size;
10041 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10042 if (g_type != mod_k_type || g_size != mod_k_size)
10044 first_error (_("inconsistent types in Neon instruction"));
10052 return inst.vectype.el[key_el];
10055 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10056 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10059 neon_dp_fixup (unsigned i)
10063 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10077 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10081 neon_logbits (unsigned x)
10083 return ffs (x) - 4;
10086 #define LOW4(R) ((R) & 0xf)
10087 #define HI1(R) (((R) >> 4) & 1)
10089 /* Encode insns with bit pattern:
10091 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10092 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10094 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10095 different meaning for some instruction. */
10098 neon_three_same (int isquad, int ubit, int size)
10100 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10101 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10102 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10103 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10104 inst.instruction |= LOW4 (inst.operands[2].reg);
10105 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10106 inst.instruction |= (isquad != 0) << 6;
10107 inst.instruction |= (ubit != 0) << 24;
10109 inst.instruction |= neon_logbits (size) << 20;
10111 inst.instruction = neon_dp_fixup (inst.instruction);
10114 /* Encode instructions of the form:
10116 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10117 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10119 Don't write size if SIZE == -1. */
10122 neon_two_same (int qbit, int ubit, int size)
10124 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10125 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10126 inst.instruction |= LOW4 (inst.operands[1].reg);
10127 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10128 inst.instruction |= (qbit != 0) << 6;
10129 inst.instruction |= (ubit != 0) << 24;
10132 inst.instruction |= neon_logbits (size) << 18;
10134 inst.instruction = neon_dp_fixup (inst.instruction);
10137 /* Neon instruction encoders, in approximate order of appearance. */
10140 do_neon_dyadic_i_su (void)
10142 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10143 struct neon_type_el et = neon_check_type (3, rs,
10144 N_EQK, N_EQK, N_SU_32 | N_KEY);
10145 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10149 do_neon_dyadic_i64_su (void)
10151 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10152 struct neon_type_el et = neon_check_type (3, rs,
10153 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10154 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10158 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10161 unsigned size = et.size >> 3;
10162 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10163 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10164 inst.instruction |= LOW4 (inst.operands[1].reg);
10165 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10166 inst.instruction |= (isquad != 0) << 6;
10167 inst.instruction |= immbits << 16;
10168 inst.instruction |= (size >> 3) << 7;
10169 inst.instruction |= (size & 0x7) << 19;
10171 inst.instruction |= (uval != 0) << 24;
10173 inst.instruction = neon_dp_fixup (inst.instruction);
10177 do_neon_shl_imm (void)
10179 if (!inst.operands[2].isreg)
10181 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10182 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10183 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10184 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
10188 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10189 struct neon_type_el et = neon_check_type (3, rs,
10190 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10191 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10192 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10197 do_neon_qshl_imm (void)
10199 if (!inst.operands[2].isreg)
10201 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10202 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10203 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10204 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10205 inst.operands[2].imm);
10209 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10210 struct neon_type_el et = neon_check_type (3, rs,
10211 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10212 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10213 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10218 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10220 /* Handle .I8 and .I64 as pseudo-instructions. */
10224 /* Unfortunately, this will make everything apart from zero out-of-range.
10225 FIXME is this the intended semantics? There doesn't seem much point in
10226 accepting .I8 if so. */
10227 immediate |= immediate << 8;
10231 /* Similarly, anything other than zero will be replicated in bits [63:32],
10232 which probably isn't want we want if we specified .I64. */
10233 if (immediate != 0)
10234 goto bad_immediate;
10240 if (immediate == (immediate & 0x000000ff))
10242 *immbits = immediate;
10243 return (size == 16) ? 0x9 : 0x1;
10245 else if (immediate == (immediate & 0x0000ff00))
10247 *immbits = immediate >> 8;
10248 return (size == 16) ? 0xb : 0x3;
10250 else if (immediate == (immediate & 0x00ff0000))
10252 *immbits = immediate >> 16;
10255 else if (immediate == (immediate & 0xff000000))
10257 *immbits = immediate >> 24;
10262 first_error (_("immediate value out of range"));
10266 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10270 neon_bits_same_in_bytes (unsigned imm)
10272 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10273 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10274 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10275 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10278 /* For immediate of above form, return 0bABCD. */
10281 neon_squash_bits (unsigned imm)
10283 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10284 | ((imm & 0x01000000) >> 21);
10287 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10290 neon_qfloat_bits (unsigned imm)
10292 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10295 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10296 the instruction. *OP is passed as the initial value of the op field, and
10297 may be set to a different value depending on the constant (i.e.
10298 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10302 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10303 int *op, int size, enum neon_el_type type)
10305 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
10307 if (size != 32 || *op == 1)
10309 *immbits = neon_qfloat_bits (immlo);
10312 else if (size == 64 && neon_bits_same_in_bytes (immhi)
10313 && neon_bits_same_in_bytes (immlo))
10315 /* Check this one first so we don't have to bother with immhi in later
10319 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
10323 else if (immhi != 0)
10325 else if (immlo == (immlo & 0x000000ff))
10327 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10329 if ((size != 8 && size != 16 && size != 32)
10330 || (size == 8 && *op == 1))
10333 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
10335 else if (immlo == (immlo & 0x0000ff00))
10337 if (size != 16 && size != 32)
10339 *immbits = immlo >> 8;
10340 return (size == 16) ? 0xa : 0x2;
10342 else if (immlo == (immlo & 0x00ff0000))
10346 *immbits = immlo >> 16;
10349 else if (immlo == (immlo & 0xff000000))
10353 *immbits = immlo >> 24;
10356 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
10360 *immbits = (immlo >> 8) & 0xff;
10363 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
10367 *immbits = (immlo >> 16) & 0xff;
10374 /* Write immediate bits [7:0] to the following locations:
10376 |28/24|23 19|18 16|15 4|3 0|
10377 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10379 This function is used by VMOV/VMVN/VORR/VBIC. */
10382 neon_write_immbits (unsigned immbits)
10384 inst.instruction |= immbits & 0xf;
10385 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
10386 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
10389 /* Invert low-order SIZE bits of XHI:XLO. */
10392 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
10394 unsigned immlo = xlo ? *xlo : 0;
10395 unsigned immhi = xhi ? *xhi : 0;
10400 immlo = (~immlo) & 0xff;
10404 immlo = (~immlo) & 0xffff;
10408 immhi = (~immhi) & 0xffffffff;
10409 /* fall through. */
10412 immlo = (~immlo) & 0xffffffff;
10427 do_neon_logic (void)
10429 if (inst.operands[2].present && inst.operands[2].isreg)
10431 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10432 neon_check_type (3, rs, N_IGNORE_TYPE);
10433 /* U bit and size field were set as part of the bitmask. */
10434 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10435 neon_three_same (rs == NS_QQQ, 0, -1);
10439 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10440 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
10442 enum neon_opc opcode = inst.instruction & 0x0fffffff;
10446 if (et.type == NT_invtype)
10449 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10454 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10459 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10464 /* Pseudo-instruction for VBIC. */
10465 immbits = inst.operands[1].imm;
10466 neon_invert_size (&immbits, 0, et.size);
10467 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10471 /* Pseudo-instruction for VORR. */
10472 immbits = inst.operands[1].imm;
10473 neon_invert_size (&immbits, 0, et.size);
10474 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10484 inst.instruction |= (rs == NS_QI) << 6;
10485 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10486 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10487 inst.instruction |= cmode << 8;
10488 neon_write_immbits (immbits);
10490 inst.instruction = neon_dp_fixup (inst.instruction);
10495 do_neon_bitfield (void)
10497 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10498 neon_check_type (3, rs, N_IGNORE_TYPE);
10499 neon_three_same (rs == NS_QQQ, 0, -1);
10503 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
10506 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10507 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
10509 if (et.type == NT_float)
10511 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
10512 neon_three_same (rs == NS_QQQ, 0, -1);
10516 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10517 neon_three_same (rs == NS_QQQ, et.type == ubit_meaning, et.size);
10522 do_neon_dyadic_if_su (void)
10524 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10528 do_neon_dyadic_if_su_d (void)
10530 /* This version only allow D registers, but that constraint is enforced during
10531 operand parsing so we don't need to do anything extra here. */
10532 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10536 do_neon_dyadic_if_i (void)
10538 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10542 do_neon_dyadic_if_i_d (void)
10544 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10548 do_neon_addsub_if_i (void)
10550 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10551 affected if we specify unsigned args. */
10552 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
10555 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10557 V<op> A,B (A is operand 0, B is operand 2)
10562 so handle that case specially. */
10565 neon_exchange_operands (void)
10567 void *scratch = alloca (sizeof (inst.operands[0]));
10568 if (inst.operands[1].present)
10570 /* Swap operands[1] and operands[2]. */
10571 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
10572 inst.operands[1] = inst.operands[2];
10573 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
10577 inst.operands[1] = inst.operands[2];
10578 inst.operands[2] = inst.operands[0];
10583 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
10585 if (inst.operands[2].isreg)
10588 neon_exchange_operands ();
10589 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
10593 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10594 struct neon_type_el et = neon_check_type (2, rs,
10595 N_EQK | N_SIZ, immtypes | N_KEY);
10597 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10598 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10599 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10600 inst.instruction |= LOW4 (inst.operands[1].reg);
10601 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10602 inst.instruction |= (rs == NS_QQI) << 6;
10603 inst.instruction |= (et.type == NT_float) << 10;
10604 inst.instruction |= neon_logbits (et.size) << 18;
10606 inst.instruction = neon_dp_fixup (inst.instruction);
10613 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10617 do_neon_cmp_inv (void)
10619 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10625 neon_compare (N_IF_32, N_IF_32, FALSE);
10628 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10629 scalars, which are encoded in 5 bits, M : Rm.
10630 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10631 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10635 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10637 unsigned regno = NEON_SCALAR_REG (scalar);
10638 unsigned elno = NEON_SCALAR_INDEX (scalar);
10643 if (regno > 7 || elno > 3)
10645 return regno | (elno << 3);
10648 if (regno > 15 || elno > 1)
10650 return regno | (elno << 4);
10654 first_error (_("scalar out of range for multiply instruction"));
10660 /* Encode multiply / multiply-accumulate scalar instructions. */
10663 neon_mul_mac (struct neon_type_el et, int ubit)
10667 /* Give a more helpful error message if we have an invalid type. */
10668 if (et.type == NT_invtype)
10671 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10672 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10673 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10674 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10675 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10676 inst.instruction |= LOW4 (scalar);
10677 inst.instruction |= HI1 (scalar) << 5;
10678 inst.instruction |= (et.type == NT_float) << 8;
10679 inst.instruction |= neon_logbits (et.size) << 20;
10680 inst.instruction |= (ubit != 0) << 24;
10682 inst.instruction = neon_dp_fixup (inst.instruction);
10686 do_neon_mac_maybe_scalar (void)
10688 if (inst.operands[2].isscalar)
10690 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10691 struct neon_type_el et = neon_check_type (3, rs,
10692 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10693 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10694 neon_mul_mac (et, rs == NS_QQS);
10697 do_neon_dyadic_if_i ();
10703 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10704 struct neon_type_el et = neon_check_type (3, rs,
10705 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10706 neon_three_same (rs == NS_QQQ, 0, et.size);
10709 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10710 same types as the MAC equivalents. The polynomial type for this instruction
10711 is encoded the same as the integer type. */
10716 if (inst.operands[2].isscalar)
10717 do_neon_mac_maybe_scalar ();
10719 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
10723 do_neon_qdmulh (void)
10725 if (inst.operands[2].isscalar)
10727 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10728 struct neon_type_el et = neon_check_type (3, rs,
10729 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10730 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10731 neon_mul_mac (et, rs == NS_QQS);
10735 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10736 struct neon_type_el et = neon_check_type (3, rs,
10737 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10738 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10739 /* The U bit (rounding) comes from bit mask. */
10740 neon_three_same (rs == NS_QQQ, 0, et.size);
10745 do_neon_fcmp_absolute (void)
10747 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10748 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10749 /* Size field comes from bit mask. */
10750 neon_three_same (rs == NS_QQQ, 1, -1);
10754 do_neon_fcmp_absolute_inv (void)
10756 neon_exchange_operands ();
10757 do_neon_fcmp_absolute ();
10761 do_neon_step (void)
10763 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10764 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10765 neon_three_same (rs == NS_QQQ, 0, -1);
10769 do_neon_abs_neg (void)
10771 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10772 struct neon_type_el et = neon_check_type (3, rs,
10773 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10774 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10775 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10776 inst.instruction |= LOW4 (inst.operands[1].reg);
10777 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10778 inst.instruction |= (rs == NS_QQ) << 6;
10779 inst.instruction |= (et.type == NT_float) << 10;
10780 inst.instruction |= neon_logbits (et.size) << 18;
10782 inst.instruction = neon_dp_fixup (inst.instruction);
10788 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10789 struct neon_type_el et = neon_check_type (2, rs,
10790 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10791 int imm = inst.operands[2].imm;
10792 constraint (imm < 0 || (unsigned)imm >= et.size,
10793 _("immediate out of range for insert"));
10794 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10800 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10801 struct neon_type_el et = neon_check_type (2, rs,
10802 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10803 int imm = inst.operands[2].imm;
10804 constraint (imm < 1 || (unsigned)imm > et.size,
10805 _("immediate out of range for insert"));
10806 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10810 do_neon_qshlu_imm (void)
10812 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10813 struct neon_type_el et = neon_check_type (2, rs,
10814 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10815 int imm = inst.operands[2].imm;
10816 constraint (imm < 0 || (unsigned)imm >= et.size,
10817 _("immediate out of range for shift"));
10818 /* Only encodes the 'U present' variant of the instruction.
10819 In this case, signed types have OP (bit 8) set to 0.
10820 Unsigned types have OP set to 1. */
10821 inst.instruction |= (et.type == NT_unsigned) << 8;
10822 /* The rest of the bits are the same as other immediate shifts. */
10823 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10827 do_neon_qmovn (void)
10829 struct neon_type_el et = neon_check_type (2, NS_DQ,
10830 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10831 /* Saturating move where operands can be signed or unsigned, and the
10832 destination has the same signedness. */
10833 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10834 if (et.type == NT_unsigned)
10835 inst.instruction |= 0xc0;
10837 inst.instruction |= 0x80;
10838 neon_two_same (0, 1, et.size / 2);
10842 do_neon_qmovun (void)
10844 struct neon_type_el et = neon_check_type (2, NS_DQ,
10845 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10846 /* Saturating move with unsigned results. Operands must be signed. */
10847 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10848 neon_two_same (0, 1, et.size / 2);
10852 do_neon_rshift_sat_narrow (void)
10854 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10855 or unsigned. If operands are unsigned, results must also be unsigned. */
10856 struct neon_type_el et = neon_check_type (2, NS_DQI,
10857 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10858 int imm = inst.operands[2].imm;
10859 /* This gets the bounds check, size encoding and immediate bits calculation
10863 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10864 VQMOVN.I<size> <Dd>, <Qm>. */
10867 inst.operands[2].present = 0;
10868 inst.instruction = N_MNEM_vqmovn;
10873 constraint (imm < 1 || (unsigned)imm > et.size,
10874 _("immediate out of range"));
10875 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10879 do_neon_rshift_sat_narrow_u (void)
10881 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10882 or unsigned. If operands are unsigned, results must also be unsigned. */
10883 struct neon_type_el et = neon_check_type (2, NS_DQI,
10884 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10885 int imm = inst.operands[2].imm;
10886 /* This gets the bounds check, size encoding and immediate bits calculation
10890 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10891 VQMOVUN.I<size> <Dd>, <Qm>. */
10894 inst.operands[2].present = 0;
10895 inst.instruction = N_MNEM_vqmovun;
10900 constraint (imm < 1 || (unsigned)imm > et.size,
10901 _("immediate out of range"));
10902 /* FIXME: The manual is kind of unclear about what value U should have in
10903 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10905 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10909 do_neon_movn (void)
10911 struct neon_type_el et = neon_check_type (2, NS_DQ,
10912 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10913 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10914 neon_two_same (0, 1, et.size / 2);
10918 do_neon_rshift_narrow (void)
10920 struct neon_type_el et = neon_check_type (2, NS_DQI,
10921 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10922 int imm = inst.operands[2].imm;
10923 /* This gets the bounds check, size encoding and immediate bits calculation
10927 /* If immediate is zero then we are a pseudo-instruction for
10928 VMOVN.I<size> <Dd>, <Qm> */
10931 inst.operands[2].present = 0;
10932 inst.instruction = N_MNEM_vmovn;
10937 constraint (imm < 1 || (unsigned)imm > et.size,
10938 _("immediate out of range for narrowing operation"));
10939 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
10943 do_neon_shll (void)
10945 /* FIXME: Type checking when lengthening. */
10946 struct neon_type_el et = neon_check_type (2, NS_QDI,
10947 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
10948 unsigned imm = inst.operands[2].imm;
10950 if (imm == et.size)
10952 /* Maximum shift variant. */
10953 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10954 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10955 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10956 inst.instruction |= LOW4 (inst.operands[1].reg);
10957 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10958 inst.instruction |= neon_logbits (et.size) << 18;
10960 inst.instruction = neon_dp_fixup (inst.instruction);
10964 /* A more-specific type check for non-max versions. */
10965 et = neon_check_type (2, NS_QDI,
10966 N_EQK | N_DBL, N_SU_32 | N_KEY);
10967 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10968 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
10972 /* Check the various types for the VCVT instruction, and return the one that
10973 the current instruction is. */
10976 neon_cvt_flavour (enum neon_shape rs)
10978 #define CVT_VAR(C,X,Y) \
10979 et = neon_check_type (2, rs, (X), (Y)); \
10980 if (et.type != NT_invtype) \
10982 inst.error = NULL; \
10985 struct neon_type_el et;
10987 CVT_VAR (0, N_S32, N_F32);
10988 CVT_VAR (1, N_U32, N_F32);
10989 CVT_VAR (2, N_F32, N_S32);
10990 CVT_VAR (3, N_F32, N_U32);
10999 /* Fixed-point conversion with #0 immediate is encoded as an integer
11001 if (inst.operands[2].present && inst.operands[2].imm != 0)
11003 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11004 int flavour = neon_cvt_flavour (rs);
11005 unsigned immbits = 32 - inst.operands[2].imm;
11006 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11007 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11009 inst.instruction |= enctab[flavour];
11010 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11011 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11012 inst.instruction |= LOW4 (inst.operands[1].reg);
11013 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11014 inst.instruction |= (rs == NS_QQI) << 6;
11015 inst.instruction |= 1 << 21;
11016 inst.instruction |= immbits << 16;
11020 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11021 int flavour = neon_cvt_flavour (rs);
11022 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11023 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11025 inst.instruction |= enctab[flavour];
11026 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11027 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11028 inst.instruction |= LOW4 (inst.operands[1].reg);
11029 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11030 inst.instruction |= (rs == NS_QQ) << 6;
11031 inst.instruction |= 2 << 18;
11033 inst.instruction = neon_dp_fixup (inst.instruction);
11037 neon_move_immediate (void)
11039 enum neon_shape rs = neon_check_shape (NS_DI_QI);
11040 struct neon_type_el et = neon_check_type (1, rs,
11041 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
11042 unsigned immlo, immhi = 0, immbits;
11045 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11046 op = (inst.instruction & (1 << 5)) != 0;
11048 immlo = inst.operands[1].imm;
11049 if (inst.operands[1].regisimm)
11050 immhi = inst.operands[1].reg;
11052 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11053 _("immediate has bits set outside the operand size"));
11055 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11056 et.size, et.type)) == FAIL)
11058 /* Invert relevant bits only. */
11059 neon_invert_size (&immlo, &immhi, et.size);
11060 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11061 with one or the other; those cases are caught by
11062 neon_cmode_for_move_imm. */
11064 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11065 et.size, et.type)) == FAIL)
11067 first_error (_("immediate out of range"));
11072 inst.instruction &= ~(1 << 5);
11073 inst.instruction |= op << 5;
11075 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11076 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11077 inst.instruction |= (rs == NS_QI) << 6;
11078 inst.instruction |= cmode << 8;
11080 neon_write_immbits (immbits);
11086 if (inst.operands[1].isreg)
11088 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11090 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11091 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11092 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11093 inst.instruction |= LOW4 (inst.operands[1].reg);
11094 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11095 inst.instruction |= (rs == NS_QQ) << 6;
11099 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11100 neon_move_immediate ();
11103 inst.instruction = neon_dp_fixup (inst.instruction);
11106 /* Encode instructions of form:
11108 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11109 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11114 neon_mixed_length (struct neon_type_el et, unsigned size)
11116 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11117 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11118 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11119 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11120 inst.instruction |= LOW4 (inst.operands[2].reg);
11121 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11122 inst.instruction |= (et.type == NT_unsigned) << 24;
11123 inst.instruction |= neon_logbits (size) << 20;
11125 inst.instruction = neon_dp_fixup (inst.instruction);
11129 do_neon_dyadic_long (void)
11131 /* FIXME: Type checking for lengthening op. */
11132 struct neon_type_el et = neon_check_type (3, NS_QDD,
11133 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
11134 neon_mixed_length (et, et.size);
11138 do_neon_abal (void)
11140 struct neon_type_el et = neon_check_type (3, NS_QDD,
11141 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
11142 neon_mixed_length (et, et.size);
11146 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
11148 if (inst.operands[2].isscalar)
11150 struct neon_type_el et = neon_check_type (3, NS_QDS,
11151 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
11152 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11153 neon_mul_mac (et, et.type == NT_unsigned);
11157 struct neon_type_el et = neon_check_type (3, NS_QDD,
11158 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
11159 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11160 neon_mixed_length (et, et.size);
11165 do_neon_mac_maybe_scalar_long (void)
11167 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
11171 do_neon_dyadic_wide (void)
11173 struct neon_type_el et = neon_check_type (3, NS_QQD,
11174 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
11175 neon_mixed_length (et, et.size);
11179 do_neon_dyadic_narrow (void)
11181 struct neon_type_el et = neon_check_type (3, NS_QDD,
11182 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
11183 neon_mixed_length (et, et.size / 2);
11187 do_neon_mul_sat_scalar_long (void)
11189 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
11193 do_neon_vmull (void)
11195 if (inst.operands[2].isscalar)
11196 do_neon_mac_maybe_scalar_long ();
11199 struct neon_type_el et = neon_check_type (3, NS_QDD,
11200 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
11201 if (et.type == NT_poly)
11202 inst.instruction = NEON_ENC_POLY (inst.instruction);
11204 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11205 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11206 zero. Should be OK as-is. */
11207 neon_mixed_length (et, et.size);
11214 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
11215 struct neon_type_el et = neon_check_type (3, rs,
11216 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11217 unsigned imm = (inst.operands[3].imm * et.size) / 8;
11218 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11219 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11220 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11221 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11222 inst.instruction |= LOW4 (inst.operands[2].reg);
11223 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11224 inst.instruction |= (rs == NS_QQQI) << 6;
11225 inst.instruction |= imm << 8;
11227 inst.instruction = neon_dp_fixup (inst.instruction);
11233 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11234 struct neon_type_el et = neon_check_type (2, rs,
11235 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11236 unsigned op = (inst.instruction >> 7) & 3;
11237 /* N (width of reversed regions) is encoded as part of the bitmask. We
11238 extract it here to check the elements to be reversed are smaller.
11239 Otherwise we'd get a reserved instruction. */
11240 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
11241 assert (elsize != 0);
11242 constraint (et.size >= elsize,
11243 _("elements must be smaller than reversal region"));
11244 neon_two_same (rs == NS_QQ, 1, et.size);
11250 if (inst.operands[1].isscalar)
11252 enum neon_shape rs = neon_check_shape (NS_DS_QS);
11253 struct neon_type_el et = neon_check_type (2, rs,
11254 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11255 unsigned sizebits = et.size >> 3;
11256 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
11257 int logsize = neon_logbits (et.size);
11258 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
11259 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11260 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11261 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11262 inst.instruction |= LOW4 (dm);
11263 inst.instruction |= HI1 (dm) << 5;
11264 inst.instruction |= (rs == NS_QS) << 6;
11265 inst.instruction |= x << 17;
11266 inst.instruction |= sizebits << 16;
11268 inst.instruction = neon_dp_fixup (inst.instruction);
11272 enum neon_shape rs = neon_check_shape (NS_DR_QR);
11273 struct neon_type_el et = neon_check_type (1, rs,
11274 N_8 | N_16 | N_32 | N_KEY);
11275 unsigned save_cond = inst.instruction & 0xf0000000;
11276 /* Duplicate ARM register to lanes of vector. */
11277 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
11280 case 8: inst.instruction |= 0x400000; break;
11281 case 16: inst.instruction |= 0x000020; break;
11282 case 32: inst.instruction |= 0x000000; break;
11285 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11286 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
11287 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
11288 inst.instruction |= (rs == NS_QR) << 21;
11289 /* The encoding for this instruction is identical for the ARM and Thumb
11290 variants, except for the condition field. */
11292 inst.instruction |= 0xe0000000;
11294 inst.instruction |= save_cond;
11298 /* VMOV has particularly many variations. It can be one of:
11299 0. VMOV<c><q> <Qd>, <Qm>
11300 1. VMOV<c><q> <Dd>, <Dm>
11301 (Register operations, which are VORR with Rm = Rn.)
11302 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11303 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11305 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11306 (ARM register to scalar.)
11307 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11308 (Two ARM registers to vector.)
11309 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11310 (Scalar to ARM register.)
11311 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11312 (Vector to two ARM registers.)
11314 We should have just enough information to be able to disambiguate most of
11315 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11316 registers" cases. For these, abuse the .regisimm operand field to signify a
11319 All the encoded bits are hardcoded by this function.
11321 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
11322 Cases 5, 7 may be used with VFPv2 and above.
11324 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11325 can specify a type where it doesn't make sense to, and is ignored).
11331 int nargs = inst.operands[0].present + inst.operands[1].present
11332 + inst.operands[2].present;
11333 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
11334 const char *vfp_vers = "selected FPU does not support instruction";
11339 /* Cases 0, 1, 2, 3, 4, 6. */
11340 if (inst.operands[1].isscalar)
11343 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11344 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
11345 unsigned logsize = neon_logbits (et.size);
11346 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
11347 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
11348 unsigned abcdebits = 0;
11350 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11352 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11353 && et.size != 32, _(vfp_vers));
11354 constraint (et.type == NT_invtype, _("bad type for scalar"));
11355 constraint (x >= 64 / et.size, _("scalar index out of range"));
11359 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
11360 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
11361 case 32: abcdebits = 0x00; break;
11365 abcdebits |= x << logsize;
11366 inst.instruction = save_cond;
11367 inst.instruction |= 0xe100b10;
11368 inst.instruction |= LOW4 (dn) << 16;
11369 inst.instruction |= HI1 (dn) << 7;
11370 inst.instruction |= inst.operands[0].reg << 12;
11371 inst.instruction |= (abcdebits & 3) << 5;
11372 inst.instruction |= (abcdebits >> 2) << 21;
11374 else if (inst.operands[1].isreg)
11376 /* Cases 0, 1, 4. */
11377 if (inst.operands[0].isscalar)
11380 unsigned bcdebits = 0;
11381 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11382 N_8 | N_16 | N_32 | N_KEY, N_EQK);
11383 int logsize = neon_logbits (et.size);
11384 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
11385 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
11387 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11389 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11390 && et.size != 32, _(vfp_vers));
11391 constraint (et.type == NT_invtype, _("bad type for scalar"));
11392 constraint (x >= 64 / et.size, _("scalar index out of range"));
11396 case 8: bcdebits = 0x8; break;
11397 case 16: bcdebits = 0x1; break;
11398 case 32: bcdebits = 0x0; break;
11402 bcdebits |= x << logsize;
11403 inst.instruction = save_cond;
11404 inst.instruction |= 0xe000b10;
11405 inst.instruction |= LOW4 (dn) << 16;
11406 inst.instruction |= HI1 (dn) << 7;
11407 inst.instruction |= inst.operands[1].reg << 12;
11408 inst.instruction |= (bcdebits & 3) << 5;
11409 inst.instruction |= (bcdebits >> 2) << 21;
11414 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11415 /* The architecture manual I have doesn't explicitly state which
11416 value the U bit should have for register->register moves, but
11417 the equivalent VORR instruction has U = 0, so do that. */
11418 inst.instruction = 0x0200110;
11419 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11420 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11421 inst.instruction |= LOW4 (inst.operands[1].reg);
11422 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11423 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11424 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11425 inst.instruction |= (rs == NS_QQ) << 6;
11427 inst.instruction = neon_dp_fixup (inst.instruction);
11433 inst.instruction = 0x0800010;
11434 neon_move_immediate ();
11435 inst.instruction = neon_dp_fixup (inst.instruction);
11441 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
11444 if (inst.operands[0].regisimm)
11447 inst.instruction = save_cond;
11448 inst.instruction |= 0xc400b10;
11449 inst.instruction |= LOW4 (inst.operands[0].reg);
11450 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
11451 inst.instruction |= inst.operands[1].reg << 12;
11452 inst.instruction |= inst.operands[2].reg << 16;
11457 inst.instruction = save_cond;
11458 inst.instruction |= 0xc500b10;
11459 inst.instruction |= inst.operands[0].reg << 12;
11460 inst.instruction |= inst.operands[1].reg << 16;
11461 inst.instruction |= LOW4 (inst.operands[2].reg);
11462 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11472 do_neon_rshift_round_imm (void)
11474 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11475 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11476 int imm = inst.operands[2].imm;
11478 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11481 inst.operands[2].present = 0;
11486 constraint (imm < 1 || (unsigned)imm > et.size,
11487 _("immediate out of range for shift"));
11488 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
11493 do_neon_movl (void)
11495 struct neon_type_el et = neon_check_type (2, NS_QD,
11496 N_EQK | N_DBL, N_SU_32 | N_KEY);
11497 unsigned sizebits = et.size >> 3;
11498 inst.instruction |= sizebits << 19;
11499 neon_two_same (0, et.type == NT_unsigned, -1);
11505 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11506 struct neon_type_el et = neon_check_type (2, rs,
11507 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11508 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11509 neon_two_same (rs == NS_QQ, 1, et.size);
11513 do_neon_zip_uzp (void)
11515 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11516 struct neon_type_el et = neon_check_type (2, rs,
11517 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11518 if (rs == NS_DD && et.size == 32)
11520 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11521 inst.instruction = N_MNEM_vtrn;
11525 neon_two_same (rs == NS_QQ, 1, et.size);
11529 do_neon_sat_abs_neg (void)
11531 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11532 struct neon_type_el et = neon_check_type (2, rs,
11533 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11534 neon_two_same (rs == NS_QQ, 1, et.size);
11538 do_neon_pair_long (void)
11540 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11541 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
11542 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11543 inst.instruction |= (et.type == NT_unsigned) << 7;
11544 neon_two_same (rs == NS_QQ, 1, et.size);
11548 do_neon_recip_est (void)
11550 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11551 struct neon_type_el et = neon_check_type (2, rs,
11552 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
11553 inst.instruction |= (et.type == NT_float) << 8;
11554 neon_two_same (rs == NS_QQ, 1, et.size);
11560 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11561 struct neon_type_el et = neon_check_type (2, rs,
11562 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11563 neon_two_same (rs == NS_QQ, 1, et.size);
11569 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11570 struct neon_type_el et = neon_check_type (2, rs,
11571 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
11572 neon_two_same (rs == NS_QQ, 1, et.size);
11578 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11579 struct neon_type_el et = neon_check_type (2, rs,
11580 N_EQK | N_INT, N_8 | N_KEY);
11581 neon_two_same (rs == NS_QQ, 1, et.size);
11587 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11588 neon_two_same (rs == NS_QQ, 1, -1);
11592 do_neon_tbl_tbx (void)
11594 unsigned listlenbits;
11595 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
11597 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
11599 first_error (_("bad list length for table lookup"));
11603 listlenbits = inst.operands[1].imm - 1;
11604 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11605 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11606 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11607 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11608 inst.instruction |= LOW4 (inst.operands[2].reg);
11609 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11610 inst.instruction |= listlenbits << 8;
11612 inst.instruction = neon_dp_fixup (inst.instruction);
11616 do_neon_ldm_stm (void)
11618 /* P, U and L bits are part of bitmask. */
11619 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
11620 unsigned offsetbits = inst.operands[1].imm * 2;
11622 constraint (is_dbmode && !inst.operands[0].writeback,
11623 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11625 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
11626 _("register list must contain at least 1 and at most 16 "
11629 inst.instruction |= inst.operands[0].reg << 16;
11630 inst.instruction |= inst.operands[0].writeback << 21;
11631 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11632 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11634 inst.instruction |= offsetbits;
11637 inst.instruction |= 0xe0000000;
11641 do_neon_ldr_str (void)
11643 unsigned offsetbits;
11645 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11647 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11648 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11650 constraint (inst.reloc.pc_rel && !is_ldr,
11651 _("PC-relative addressing unavailable with VSTR"));
11653 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11654 _("Immediate value must be a constant"));
11656 if (inst.reloc.exp.X_add_number < 0)
11659 offsetbits = -inst.reloc.exp.X_add_number / 4;
11662 offsetbits = inst.reloc.exp.X_add_number / 4;
11664 /* FIXME: Does this catch everything? */
11665 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11666 || inst.operands[1].postind || inst.operands[1].writeback
11667 || inst.operands[1].immisreg || inst.operands[1].shifted,
11669 constraint ((inst.operands[1].imm & 3) != 0,
11670 _("Offset must be a multiple of 4"));
11671 constraint (offsetbits != (offsetbits & 0xff),
11672 _("Immediate offset out of range"));
11674 inst.instruction |= inst.operands[1].reg << 16;
11675 inst.instruction |= offsetbits & 0xff;
11676 inst.instruction |= offset_up << 23;
11679 inst.instruction |= 0xe0000000;
11681 if (inst.reloc.pc_rel)
11684 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11686 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11689 inst.reloc.type = BFD_RELOC_UNUSED;
11692 /* "interleave" version also handles non-interleaving register VLD1/VST1
11696 do_neon_ld_st_interleave (void)
11698 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11699 N_8 | N_16 | N_32 | N_64);
11700 unsigned alignbits = 0;
11702 /* The bits in this table go:
11703 0: register stride of one (0) or two (1)
11704 1,2: register list length, minus one (1, 2, 3, 4).
11705 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11706 We use -1 for invalid entries. */
11707 const int typetable[] =
11709 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11710 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11711 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11712 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11716 if (et.type == NT_invtype)
11719 if (inst.operands[1].immisalign)
11720 switch (inst.operands[1].imm >> 8)
11722 case 64: alignbits = 1; break;
11724 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11725 goto bad_alignment;
11729 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11730 goto bad_alignment;
11735 first_error (_("bad alignment"));
11739 inst.instruction |= alignbits << 4;
11740 inst.instruction |= neon_logbits (et.size) << 6;
11742 /* Bits [4:6] of the immediate in a list specifier encode register stride
11743 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11744 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11745 up the right value for "type" in a table based on this value and the given
11746 list style, then stick it back. */
11747 idx = ((inst.operands[0].imm >> 4) & 7)
11748 | (((inst.instruction >> 8) & 3) << 3);
11750 typebits = typetable[idx];
11752 constraint (typebits == -1, _("bad list type for instruction"));
11754 inst.instruction &= ~0xf00;
11755 inst.instruction |= typebits << 8;
11758 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11759 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11760 otherwise. The variable arguments are a list of pairs of legal (size, align)
11761 values, terminated with -1. */
11764 neon_alignment_bit (int size, int align, int *do_align, ...)
11767 int result = FAIL, thissize, thisalign;
11769 if (!inst.operands[1].immisalign)
11775 va_start (ap, do_align);
11779 thissize = va_arg (ap, int);
11780 if (thissize == -1)
11782 thisalign = va_arg (ap, int);
11784 if (size == thissize && align == thisalign)
11787 while (result != SUCCESS);
11791 if (result == SUCCESS)
11794 first_error (_("unsupported alignment for instruction"));
11800 do_neon_ld_st_lane (void)
11802 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11803 int align_good, do_align = 0;
11804 int logsize = neon_logbits (et.size);
11805 int align = inst.operands[1].imm >> 8;
11806 int n = (inst.instruction >> 8) & 3;
11807 int max_el = 64 / et.size;
11809 if (et.type == NT_invtype)
11812 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11813 _("bad list length"));
11814 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11815 _("scalar index out of range"));
11816 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11818 _("stride of 2 unavailable when element size is 8"));
11822 case 0: /* VLD1 / VST1. */
11823 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11825 if (align_good == FAIL)
11829 unsigned alignbits = 0;
11832 case 16: alignbits = 0x1; break;
11833 case 32: alignbits = 0x3; break;
11836 inst.instruction |= alignbits << 4;
11840 case 1: /* VLD2 / VST2. */
11841 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11843 if (align_good == FAIL)
11846 inst.instruction |= 1 << 4;
11849 case 2: /* VLD3 / VST3. */
11850 constraint (inst.operands[1].immisalign,
11851 _("can't use alignment with this instruction"));
11854 case 3: /* VLD4 / VST4. */
11855 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11856 16, 64, 32, 64, 32, 128, -1);
11857 if (align_good == FAIL)
11861 unsigned alignbits = 0;
11864 case 8: alignbits = 0x1; break;
11865 case 16: alignbits = 0x1; break;
11866 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11869 inst.instruction |= alignbits << 4;
11876 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11877 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11878 inst.instruction |= 1 << (4 + logsize);
11880 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11881 inst.instruction |= logsize << 10;
11884 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11887 do_neon_ld_dup (void)
11889 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11890 int align_good, do_align = 0;
11892 if (et.type == NT_invtype)
11895 switch ((inst.instruction >> 8) & 3)
11897 case 0: /* VLD1. */
11898 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11899 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11900 &do_align, 16, 16, 32, 32, -1);
11901 if (align_good == FAIL)
11903 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11906 case 2: inst.instruction |= 1 << 5; break;
11907 default: first_error (_("bad list length")); return;
11909 inst.instruction |= neon_logbits (et.size) << 6;
11912 case 1: /* VLD2. */
11913 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11914 &do_align, 8, 16, 16, 32, 32, 64, -1);
11915 if (align_good == FAIL)
11917 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11918 _("bad list length"));
11919 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11920 inst.instruction |= 1 << 5;
11921 inst.instruction |= neon_logbits (et.size) << 6;
11924 case 2: /* VLD3. */
11925 constraint (inst.operands[1].immisalign,
11926 _("can't use alignment with this instruction"));
11927 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
11928 _("bad list length"));
11929 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11930 inst.instruction |= 1 << 5;
11931 inst.instruction |= neon_logbits (et.size) << 6;
11934 case 3: /* VLD4. */
11936 int align = inst.operands[1].imm >> 8;
11937 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11938 16, 64, 32, 64, 32, 128, -1);
11939 if (align_good == FAIL)
11941 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
11942 _("bad list length"));
11943 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11944 inst.instruction |= 1 << 5;
11945 if (et.size == 32 && align == 128)
11946 inst.instruction |= 0x3 << 6;
11948 inst.instruction |= neon_logbits (et.size) << 6;
11955 inst.instruction |= do_align << 4;
11958 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11959 apart from bits [11:4]. */
11962 do_neon_ldx_stx (void)
11964 switch (NEON_LANE (inst.operands[0].imm))
11966 case NEON_INTERLEAVE_LANES:
11967 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
11968 do_neon_ld_st_interleave ();
11971 case NEON_ALL_LANES:
11972 inst.instruction = NEON_ENC_DUP (inst.instruction);
11977 inst.instruction = NEON_ENC_LANE (inst.instruction);
11978 do_neon_ld_st_lane ();
11981 /* L bit comes from bit mask. */
11982 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11983 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11984 inst.instruction |= inst.operands[1].reg << 16;
11986 if (inst.operands[1].postind)
11988 int postreg = inst.operands[1].imm & 0xf;
11989 constraint (!inst.operands[1].immisreg,
11990 _("post-index must be a register"));
11991 constraint (postreg == 0xd || postreg == 0xf,
11992 _("bad register for post-index"));
11993 inst.instruction |= postreg;
11995 else if (inst.operands[1].writeback)
11997 inst.instruction |= 0xd;
12000 inst.instruction |= 0xf;
12003 inst.instruction |= 0xf9000000;
12005 inst.instruction |= 0xf4000000;
12009 /* Overall per-instruction processing. */
12011 /* We need to be able to fix up arbitrary expressions in some statements.
12012 This is so that we can handle symbols that are an arbitrary distance from
12013 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
12014 which returns part of an address in a form which will be valid for
12015 a data instruction. We do this by pushing the expression into a symbol
12016 in the expr_section, and creating a fix for that. */
12019 fix_new_arm (fragS * frag,
12034 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
12038 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
12043 /* Mark whether the fix is to a THUMB instruction, or an ARM
12045 new_fix->tc_fix_data = thumb_mode;
12048 /* Create a frg for an instruction requiring relaxation. */
12050 output_relax_insn (void)
12057 /* The size of the instruction is unknown, so tie the debug info to the
12058 start of the instruction. */
12059 dwarf2_emit_insn (0);
12062 switch (inst.reloc.exp.X_op)
12065 sym = inst.reloc.exp.X_add_symbol;
12066 offset = inst.reloc.exp.X_add_number;
12070 offset = inst.reloc.exp.X_add_number;
12073 sym = make_expr_symbol (&inst.reloc.exp);
12077 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
12078 inst.relax, sym, offset, NULL/*offset, opcode*/);
12079 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
12082 /* Write a 32-bit thumb instruction to buf. */
12084 put_thumb32_insn (char * buf, unsigned long insn)
12086 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
12087 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
12091 output_inst (const char * str)
12097 as_bad ("%s -- `%s'", inst.error, str);
12101 output_relax_insn();
12104 if (inst.size == 0)
12107 to = frag_more (inst.size);
12109 if (thumb_mode && (inst.size > THUMB_SIZE))
12111 assert (inst.size == (2 * THUMB_SIZE));
12112 put_thumb32_insn (to, inst.instruction);
12114 else if (inst.size > INSN_SIZE)
12116 assert (inst.size == (2 * INSN_SIZE));
12117 md_number_to_chars (to, inst.instruction, INSN_SIZE);
12118 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
12121 md_number_to_chars (to, inst.instruction, inst.size);
12123 if (inst.reloc.type != BFD_RELOC_UNUSED)
12124 fix_new_arm (frag_now, to - frag_now->fr_literal,
12125 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
12129 dwarf2_emit_insn (inst.size);
12133 /* Tag values used in struct asm_opcode's tag field. */
12136 OT_unconditional, /* Instruction cannot be conditionalized.
12137 The ARM condition field is still 0xE. */
12138 OT_unconditionalF, /* Instruction cannot be conditionalized
12139 and carries 0xF in its ARM condition field. */
12140 OT_csuffix, /* Instruction takes a conditional suffix. */
12141 OT_cinfix3, /* Instruction takes a conditional infix,
12142 beginning at character index 3. (In
12143 unified mode, it becomes a suffix.) */
12144 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
12145 tsts, cmps, cmns, and teqs. */
12146 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
12147 character index 3, even in unified mode. Used for
12148 legacy instructions where suffix and infix forms
12149 may be ambiguous. */
12150 OT_csuf_or_in3, /* Instruction takes either a conditional
12151 suffix or an infix at character index 3. */
12152 OT_odd_infix_unc, /* This is the unconditional variant of an
12153 instruction that takes a conditional infix
12154 at an unusual position. In unified mode,
12155 this variant will accept a suffix. */
12156 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
12157 are the conditional variants of instructions that
12158 take conditional infixes in unusual positions.
12159 The infix appears at character index
12160 (tag - OT_odd_infix_0). These are not accepted
12161 in unified mode. */
12164 /* Subroutine of md_assemble, responsible for looking up the primary
12165 opcode from the mnemonic the user wrote. STR points to the
12166 beginning of the mnemonic.
12168 This is not simply a hash table lookup, because of conditional
12169 variants. Most instructions have conditional variants, which are
12170 expressed with a _conditional affix_ to the mnemonic. If we were
12171 to encode each conditional variant as a literal string in the opcode
12172 table, it would have approximately 20,000 entries.
12174 Most mnemonics take this affix as a suffix, and in unified syntax,
12175 'most' is upgraded to 'all'. However, in the divided syntax, some
12176 instructions take the affix as an infix, notably the s-variants of
12177 the arithmetic instructions. Of those instructions, all but six
12178 have the infix appear after the third character of the mnemonic.
12180 Accordingly, the algorithm for looking up primary opcodes given
12183 1. Look up the identifier in the opcode table.
12184 If we find a match, go to step U.
12186 2. Look up the last two characters of the identifier in the
12187 conditions table. If we find a match, look up the first N-2
12188 characters of the identifier in the opcode table. If we
12189 find a match, go to step CE.
12191 3. Look up the fourth and fifth characters of the identifier in
12192 the conditions table. If we find a match, extract those
12193 characters from the identifier, and look up the remaining
12194 characters in the opcode table. If we find a match, go
12199 U. Examine the tag field of the opcode structure, in case this is
12200 one of the six instructions with its conditional infix in an
12201 unusual place. If it is, the tag tells us where to find the
12202 infix; look it up in the conditions table and set inst.cond
12203 accordingly. Otherwise, this is an unconditional instruction.
12204 Again set inst.cond accordingly. Return the opcode structure.
12206 CE. Examine the tag field to make sure this is an instruction that
12207 should receive a conditional suffix. If it is not, fail.
12208 Otherwise, set inst.cond from the suffix we already looked up,
12209 and return the opcode structure.
12211 CM. Examine the tag field to make sure this is an instruction that
12212 should receive a conditional infix after the third character.
12213 If it is not, fail. Otherwise, undo the edits to the current
12214 line of input and proceed as for case CE. */
12216 static const struct asm_opcode *
12217 opcode_lookup (char **str)
12221 const struct asm_opcode *opcode;
12222 const struct asm_cond *cond;
12225 /* Scan up to the end of the mnemonic, which must end in white space,
12226 '.' (in unified mode only), or end of string. */
12227 for (base = end = *str; *end != '\0'; end++)
12228 if (*end == ' ' || (unified_syntax && *end == '.'))
12234 /* Handle a possible width suffix and/or Neon type suffix. */
12241 else if (end[1] == 'n')
12246 inst.vectype.elems = 0;
12248 *str = end + offset;
12250 if (end[offset] == '.')
12252 /* See if we have a Neon type suffix. */
12253 if (parse_neon_type (&inst.vectype, str) == FAIL)
12256 else if (end[offset] != '\0' && end[offset] != ' ')
12262 /* Look for unaffixed or special-case affixed mnemonic. */
12263 opcode = hash_find_n (arm_ops_hsh, base, end - base);
12267 if (opcode->tag < OT_odd_infix_0)
12269 inst.cond = COND_ALWAYS;
12273 if (unified_syntax)
12274 as_warn (_("conditional infixes are deprecated in unified syntax"));
12275 affix = base + (opcode->tag - OT_odd_infix_0);
12276 cond = hash_find_n (arm_cond_hsh, affix, 2);
12279 inst.cond = cond->value;
12283 /* Cannot have a conditional suffix on a mnemonic of less than two
12285 if (end - base < 3)
12288 /* Look for suffixed mnemonic. */
12290 cond = hash_find_n (arm_cond_hsh, affix, 2);
12291 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
12292 if (opcode && cond)
12295 switch (opcode->tag)
12297 case OT_cinfix3_legacy:
12298 /* Ignore conditional suffixes matched on infix only mnemonics. */
12302 case OT_cinfix3_deprecated:
12303 case OT_odd_infix_unc:
12304 if (!unified_syntax)
12306 /* else fall through */
12309 case OT_csuf_or_in3:
12310 inst.cond = cond->value;
12313 case OT_unconditional:
12314 case OT_unconditionalF:
12317 inst.cond = cond->value;
12321 /* delayed diagnostic */
12322 inst.error = BAD_COND;
12323 inst.cond = COND_ALWAYS;
12332 /* Cannot have a usual-position infix on a mnemonic of less than
12333 six characters (five would be a suffix). */
12334 if (end - base < 6)
12337 /* Look for infixed mnemonic in the usual position. */
12339 cond = hash_find_n (arm_cond_hsh, affix, 2);
12343 memcpy (save, affix, 2);
12344 memmove (affix, affix + 2, (end - affix) - 2);
12345 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
12346 memmove (affix + 2, affix, (end - affix) - 2);
12347 memcpy (affix, save, 2);
12350 && (opcode->tag == OT_cinfix3
12351 || opcode->tag == OT_cinfix3_deprecated
12352 || opcode->tag == OT_csuf_or_in3
12353 || opcode->tag == OT_cinfix3_legacy))
12357 && (opcode->tag == OT_cinfix3
12358 || opcode->tag == OT_cinfix3_deprecated))
12359 as_warn (_("conditional infixes are deprecated in unified syntax"));
12361 inst.cond = cond->value;
12369 md_assemble (char *str)
12372 const struct asm_opcode * opcode;
12374 /* Align the previous label if needed. */
12375 if (last_label_seen != NULL)
12377 symbol_set_frag (last_label_seen, frag_now);
12378 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
12379 S_SET_SEGMENT (last_label_seen, now_seg);
12382 memset (&inst, '\0', sizeof (inst));
12383 inst.reloc.type = BFD_RELOC_UNUSED;
12385 opcode = opcode_lookup (&p);
12388 /* It wasn't an instruction, but it might be a register alias of
12389 the form alias .req reg, or a Neon .dn/.qn directive. */
12390 if (!create_register_alias (str, p)
12391 && !create_neon_reg_alias (str, p))
12392 as_bad (_("bad instruction `%s'"), str);
12397 if (opcode->tag == OT_cinfix3_deprecated)
12398 as_warn (_("s suffix on comparison instruction is deprecated"));
12402 arm_feature_set variant;
12404 variant = cpu_variant;
12405 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12406 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
12407 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
12408 /* Check that this instruction is supported for this CPU. */
12409 if (!opcode->tvariant
12410 || (thumb_mode == 1
12411 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
12413 as_bad (_("selected processor does not support `%s'"), str);
12416 if (inst.cond != COND_ALWAYS && !unified_syntax
12417 && opcode->tencode != do_t_branch)
12419 as_bad (_("Thumb does not support conditional execution"));
12423 /* Check conditional suffixes. */
12424 if (current_it_mask)
12427 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
12428 current_it_mask <<= 1;
12429 current_it_mask &= 0x1f;
12430 /* The BKPT instruction is unconditional even in an IT block. */
12432 && cond != inst.cond && opcode->tencode != do_t_bkpt)
12434 as_bad (_("incorrect condition in IT block"));
12438 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
12440 as_bad (_("thumb conditional instrunction not in IT block"));
12444 mapping_state (MAP_THUMB);
12445 inst.instruction = opcode->tvalue;
12447 if (!parse_operands (p, opcode->operands))
12448 opcode->tencode ();
12450 /* Clear current_it_mask at the end of an IT block. */
12451 if (current_it_mask == 0x10)
12452 current_it_mask = 0;
12454 if (!(inst.error || inst.relax))
12456 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
12457 inst.size = (inst.instruction > 0xffff ? 4 : 2);
12458 if (inst.size_req && inst.size_req != inst.size)
12460 as_bad (_("cannot honor width suffix -- `%s'"), str);
12464 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12465 *opcode->tvariant);
12466 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12467 set those bits when Thumb-2 32-bit instructions are seen. ie.
12468 anything other than bl/blx.
12469 This is overly pessimistic for relaxable instructions. */
12470 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
12472 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12477 /* Check that this instruction is supported for this CPU. */
12478 if (!opcode->avariant ||
12479 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
12481 as_bad (_("selected processor does not support `%s'"), str);
12486 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
12490 mapping_state (MAP_ARM);
12491 inst.instruction = opcode->avalue;
12492 if (opcode->tag == OT_unconditionalF)
12493 inst.instruction |= 0xF << 28;
12495 inst.instruction |= inst.cond << 28;
12496 inst.size = INSN_SIZE;
12497 if (!parse_operands (p, opcode->operands))
12498 opcode->aencode ();
12499 /* Arm mode bx is marked as both v4T and v5 because it's still required
12500 on a hypothetical non-thumb v5 core. */
12501 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
12502 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
12503 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
12505 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
12506 *opcode->avariant);
12511 /* Various frobbings of labels and their addresses. */
12514 arm_start_line_hook (void)
12516 last_label_seen = NULL;
12520 arm_frob_label (symbolS * sym)
12522 last_label_seen = sym;
12524 ARM_SET_THUMB (sym, thumb_mode);
12526 #if defined OBJ_COFF || defined OBJ_ELF
12527 ARM_SET_INTERWORK (sym, support_interwork);
12530 /* Note - do not allow local symbols (.Lxxx) to be labeled
12531 as Thumb functions. This is because these labels, whilst
12532 they exist inside Thumb code, are not the entry points for
12533 possible ARM->Thumb calls. Also, these labels can be used
12534 as part of a computed goto or switch statement. eg gcc
12535 can generate code that looks like this:
12537 ldr r2, [pc, .Laaa]
12547 The first instruction loads the address of the jump table.
12548 The second instruction converts a table index into a byte offset.
12549 The third instruction gets the jump address out of the table.
12550 The fourth instruction performs the jump.
12552 If the address stored at .Laaa is that of a symbol which has the
12553 Thumb_Func bit set, then the linker will arrange for this address
12554 to have the bottom bit set, which in turn would mean that the
12555 address computation performed by the third instruction would end
12556 up with the bottom bit set. Since the ARM is capable of unaligned
12557 word loads, the instruction would then load the incorrect address
12558 out of the jump table, and chaos would ensue. */
12559 if (label_is_thumb_function_name
12560 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
12561 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
12563 /* When the address of a Thumb function is taken the bottom
12564 bit of that address should be set. This will allow
12565 interworking between Arm and Thumb functions to work
12568 THUMB_SET_FUNC (sym, 1);
12570 label_is_thumb_function_name = FALSE;
12574 dwarf2_emit_label (sym);
12579 arm_data_in_code (void)
12581 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12583 *input_line_pointer = '/';
12584 input_line_pointer += 5;
12585 *input_line_pointer = 0;
12593 arm_canonicalize_symbol_name (char * name)
12597 if (thumb_mode && (len = strlen (name)) > 5
12598 && streq (name + len - 5, "/data"))
12599 *(name + len - 5) = 0;
12604 /* Table of all register names defined by default. The user can
12605 define additional names with .req. Note that all register names
12606 should appear in both upper and lowercase variants. Some registers
12607 also have mixed-case names. */
12609 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12610 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12611 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12612 #define REGSET(p,t) \
12613 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12614 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12615 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12616 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12617 #define REGSETH(p,t) \
12618 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12619 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12620 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12621 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12622 #define REGSET2(p,t) \
12623 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12624 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12625 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12626 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12628 static const struct reg_entry reg_names[] =
12630 /* ARM integer registers. */
12631 REGSET(r, RN), REGSET(R, RN),
12633 /* ATPCS synonyms. */
12634 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12635 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12636 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12638 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12639 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12640 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12642 /* Well-known aliases. */
12643 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12644 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12646 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12647 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12649 /* Coprocessor numbers. */
12650 REGSET(p, CP), REGSET(P, CP),
12652 /* Coprocessor register numbers. The "cr" variants are for backward
12654 REGSET(c, CN), REGSET(C, CN),
12655 REGSET(cr, CN), REGSET(CR, CN),
12657 /* FPA registers. */
12658 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12659 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12661 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12662 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12664 /* VFP SP registers. */
12665 REGSET(s,VFS), REGSET(S,VFS),
12666 REGSETH(s,VFS), REGSETH(S,VFS),
12668 /* VFP DP Registers. */
12669 REGSET(d,VFD), REGSET(D,VFD),
12670 /* Extra Neon DP registers. */
12671 REGSETH(d,VFD), REGSETH(D,VFD),
12673 /* Neon QP registers. */
12674 REGSET2(q,NQ), REGSET2(Q,NQ),
12676 /* VFP control registers. */
12677 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12678 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12680 /* Maverick DSP coprocessor registers. */
12681 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12682 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12684 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12685 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12686 REGDEF(dspsc,0,DSPSC),
12688 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12689 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12690 REGDEF(DSPSC,0,DSPSC),
12692 /* iWMMXt data registers - p0, c0-15. */
12693 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12695 /* iWMMXt control registers - p1, c0-3. */
12696 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12697 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12698 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12699 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12701 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12702 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12703 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12704 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12705 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12707 /* XScale accumulator registers. */
12708 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12714 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12715 within psr_required_here. */
12716 static const struct asm_psr psrs[] =
12718 /* Backward compatibility notation. Note that "all" is no longer
12719 truly all possible PSR bits. */
12720 {"all", PSR_c | PSR_f},
12724 /* Individual flags. */
12729 /* Combinations of flags. */
12730 {"fs", PSR_f | PSR_s},
12731 {"fx", PSR_f | PSR_x},
12732 {"fc", PSR_f | PSR_c},
12733 {"sf", PSR_s | PSR_f},
12734 {"sx", PSR_s | PSR_x},
12735 {"sc", PSR_s | PSR_c},
12736 {"xf", PSR_x | PSR_f},
12737 {"xs", PSR_x | PSR_s},
12738 {"xc", PSR_x | PSR_c},
12739 {"cf", PSR_c | PSR_f},
12740 {"cs", PSR_c | PSR_s},
12741 {"cx", PSR_c | PSR_x},
12742 {"fsx", PSR_f | PSR_s | PSR_x},
12743 {"fsc", PSR_f | PSR_s | PSR_c},
12744 {"fxs", PSR_f | PSR_x | PSR_s},
12745 {"fxc", PSR_f | PSR_x | PSR_c},
12746 {"fcs", PSR_f | PSR_c | PSR_s},
12747 {"fcx", PSR_f | PSR_c | PSR_x},
12748 {"sfx", PSR_s | PSR_f | PSR_x},
12749 {"sfc", PSR_s | PSR_f | PSR_c},
12750 {"sxf", PSR_s | PSR_x | PSR_f},
12751 {"sxc", PSR_s | PSR_x | PSR_c},
12752 {"scf", PSR_s | PSR_c | PSR_f},
12753 {"scx", PSR_s | PSR_c | PSR_x},
12754 {"xfs", PSR_x | PSR_f | PSR_s},
12755 {"xfc", PSR_x | PSR_f | PSR_c},
12756 {"xsf", PSR_x | PSR_s | PSR_f},
12757 {"xsc", PSR_x | PSR_s | PSR_c},
12758 {"xcf", PSR_x | PSR_c | PSR_f},
12759 {"xcs", PSR_x | PSR_c | PSR_s},
12760 {"cfs", PSR_c | PSR_f | PSR_s},
12761 {"cfx", PSR_c | PSR_f | PSR_x},
12762 {"csf", PSR_c | PSR_s | PSR_f},
12763 {"csx", PSR_c | PSR_s | PSR_x},
12764 {"cxf", PSR_c | PSR_x | PSR_f},
12765 {"cxs", PSR_c | PSR_x | PSR_s},
12766 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12767 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12768 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12769 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12770 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12771 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12772 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12773 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12774 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12775 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12776 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12777 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12778 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12779 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12780 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12781 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12782 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12783 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12784 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12785 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12786 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12787 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12788 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12789 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12792 /* Table of V7M psr names. */
12793 static const struct asm_psr v7m_psrs[] =
12806 {"basepri_max", 18},
12811 /* Table of all shift-in-operand names. */
12812 static const struct asm_shift_name shift_names [] =
12814 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12815 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12816 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12817 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12818 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12819 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12822 /* Table of all explicit relocation names. */
12824 static struct reloc_entry reloc_names[] =
12826 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12827 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12828 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12829 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12830 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12831 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12832 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12833 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12834 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12835 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12836 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12840 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12841 static const struct asm_cond conds[] =
12845 {"cs", 0x2}, {"hs", 0x2},
12846 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12860 static struct asm_barrier_opt barrier_opt_names[] =
12868 /* Table of ARM-format instructions. */
12870 /* Macros for gluing together operand strings. N.B. In all cases
12871 other than OPS0, the trailing OP_stop comes from default
12872 zero-initialization of the unspecified elements of the array. */
12873 #define OPS0() { OP_stop, }
12874 #define OPS1(a) { OP_##a, }
12875 #define OPS2(a,b) { OP_##a,OP_##b, }
12876 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12877 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12878 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12879 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12881 /* These macros abstract out the exact format of the mnemonic table and
12882 save some repeated characters. */
12884 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12885 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12886 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12887 THUMB_VARIANT, do_##ae, do_##te }
12889 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12890 a T_MNEM_xyz enumerator. */
12891 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12892 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12893 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12894 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12896 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12897 infix after the third character. */
12898 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12899 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12900 THUMB_VARIANT, do_##ae, do_##te }
12901 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
12902 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
12903 THUMB_VARIANT, do_##ae, do_##te }
12904 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12905 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12906 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
12907 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
12908 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12909 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12910 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
12911 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12913 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12914 appear in the condition table. */
12915 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12916 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12917 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12919 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12920 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12921 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12922 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12923 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12924 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12925 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12926 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12927 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12928 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12929 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12930 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12931 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12932 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12933 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12934 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12935 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12936 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12937 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12938 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12940 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12941 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12942 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12943 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12945 /* Mnemonic that cannot be conditionalized. The ARM condition-code
12946 field is still 0xE. Many of the Thumb variants can be executed
12947 conditionally, so this is checked separately. */
12948 #define TUE(mnem, op, top, nops, ops, ae, te) \
12949 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12950 THUMB_VARIANT, do_##ae, do_##te }
12952 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12953 condition code field. */
12954 #define TUF(mnem, op, top, nops, ops, ae, te) \
12955 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12956 THUMB_VARIANT, do_##ae, do_##te }
12958 /* ARM-only variants of all the above. */
12959 #define CE(mnem, op, nops, ops, ae) \
12960 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12962 #define C3(mnem, op, nops, ops, ae) \
12963 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12965 /* Legacy mnemonics that always have conditional infix after the third
12967 #define CL(mnem, op, nops, ops, ae) \
12968 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12969 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12971 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12972 #define cCE(mnem, op, nops, ops, ae) \
12973 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12975 /* Legacy coprocessor instructions where conditional infix and conditional
12976 suffix are ambiguous. For consistency this includes all FPA instructions,
12977 not just the potentially ambiguous ones. */
12978 #define cCL(mnem, op, nops, ops, ae) \
12979 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12980 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12982 /* Coprocessor, takes either a suffix or a position-3 infix
12983 (for an FPA corner case). */
12984 #define C3E(mnem, op, nops, ops, ae) \
12985 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12986 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12988 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
12989 { #m1 #m2 #m3, OPS##nops ops, \
12990 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12991 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12993 #define CM(m1, m2, op, nops, ops, ae) \
12994 xCM_(m1, , m2, op, nops, ops, ae), \
12995 xCM_(m1, eq, m2, op, nops, ops, ae), \
12996 xCM_(m1, ne, m2, op, nops, ops, ae), \
12997 xCM_(m1, cs, m2, op, nops, ops, ae), \
12998 xCM_(m1, hs, m2, op, nops, ops, ae), \
12999 xCM_(m1, cc, m2, op, nops, ops, ae), \
13000 xCM_(m1, ul, m2, op, nops, ops, ae), \
13001 xCM_(m1, lo, m2, op, nops, ops, ae), \
13002 xCM_(m1, mi, m2, op, nops, ops, ae), \
13003 xCM_(m1, pl, m2, op, nops, ops, ae), \
13004 xCM_(m1, vs, m2, op, nops, ops, ae), \
13005 xCM_(m1, vc, m2, op, nops, ops, ae), \
13006 xCM_(m1, hi, m2, op, nops, ops, ae), \
13007 xCM_(m1, ls, m2, op, nops, ops, ae), \
13008 xCM_(m1, ge, m2, op, nops, ops, ae), \
13009 xCM_(m1, lt, m2, op, nops, ops, ae), \
13010 xCM_(m1, gt, m2, op, nops, ops, ae), \
13011 xCM_(m1, le, m2, op, nops, ops, ae), \
13012 xCM_(m1, al, m2, op, nops, ops, ae)
13014 #define UE(mnem, op, nops, ops, ae) \
13015 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13017 #define UF(mnem, op, nops, ops, ae) \
13018 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13020 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
13021 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
13022 use the same encoding function for each. */
13023 #define NUF(mnem, op, nops, ops, enc) \
13024 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
13025 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13027 /* Neon data processing, version which indirects through neon_enc_tab for
13028 the various overloaded versions of opcodes. */
13029 #define nUF(mnem, op, nops, ops, enc) \
13030 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
13031 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13033 /* Neon insn with conditional suffix for the ARM version, non-overloaded
13035 #define NCE(mnem, op, nops, ops, enc) \
13036 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
13037 THUMB_VARIANT, do_##enc, do_##enc }
13039 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
13040 #define nCE(mnem, op, nops, ops, enc) \
13041 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
13042 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13046 /* Thumb-only, unconditional. */
13047 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13049 static const struct asm_opcode insns[] =
13051 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13052 #define THUMB_VARIANT &arm_ext_v4t
13053 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
13054 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
13055 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
13056 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
13057 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
13058 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
13059 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
13060 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
13061 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
13062 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
13063 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
13064 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
13065 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
13066 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
13067 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
13068 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
13070 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13071 for setting PSR flag bits. They are obsolete in V6 and do not
13072 have Thumb equivalents. */
13073 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13074 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13075 CL(tstp, 110f000, 2, (RR, SH), cmp),
13076 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13077 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13078 CL(cmpp, 150f000, 2, (RR, SH), cmp),
13079 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13080 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13081 CL(cmnp, 170f000, 2, (RR, SH), cmp),
13083 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
13084 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
13085 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
13086 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
13088 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
13089 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
13090 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
13091 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
13093 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13094 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13095 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13096 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13097 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13098 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13100 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
13101 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
13102 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
13103 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
13106 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
13107 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
13108 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
13110 /* Thumb-compatibility pseudo ops. */
13111 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
13112 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
13113 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
13114 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
13115 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
13116 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
13117 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
13118 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
13119 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
13120 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
13121 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
13122 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
13124 #undef THUMB_VARIANT
13125 #define THUMB_VARIANT &arm_ext_v6
13126 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
13128 /* V1 instructions with no Thumb analogue prior to V6T2. */
13129 #undef THUMB_VARIANT
13130 #define THUMB_VARIANT &arm_ext_v6t2
13131 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
13132 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
13133 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13134 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13135 CL(teqp, 130f000, 2, (RR, SH), cmp),
13137 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
13138 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
13139 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
13140 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
13142 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13143 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13145 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13146 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13148 /* V1 instructions with no Thumb analogue at all. */
13149 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
13150 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
13152 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
13153 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
13154 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
13155 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
13156 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
13157 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
13158 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
13159 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
13162 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13163 #undef THUMB_VARIANT
13164 #define THUMB_VARIANT &arm_ext_v4t
13165 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13166 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13168 #undef THUMB_VARIANT
13169 #define THUMB_VARIANT &arm_ext_v6t2
13170 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13171 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
13173 /* Generic coprocessor instructions. */
13174 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13175 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13176 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13177 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13178 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13179 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13180 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13183 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13184 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13185 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13188 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13189 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
13190 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
13193 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13194 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13195 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13196 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13197 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13198 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13199 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13200 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13201 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13204 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13205 #undef THUMB_VARIANT
13206 #define THUMB_VARIANT &arm_ext_v4t
13207 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
13208 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
13209 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13210 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13211 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13212 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13215 #define ARM_VARIANT &arm_ext_v4t_5
13216 /* ARM Architecture 4T. */
13217 /* Note: bx (and blx) are required on V5, even if the processor does
13218 not support Thumb. */
13219 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
13222 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13223 #undef THUMB_VARIANT
13224 #define THUMB_VARIANT &arm_ext_v5t
13225 /* Note: blx has 2 variants; the .value coded here is for
13226 BLX(2). Only this variant has conditional execution. */
13227 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
13228 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
13230 #undef THUMB_VARIANT
13231 #define THUMB_VARIANT &arm_ext_v6t2
13232 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
13233 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13234 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13235 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13236 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13237 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13238 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13239 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13242 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13243 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13244 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13245 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13246 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13248 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13249 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13251 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13252 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13253 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13254 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13256 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13257 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13258 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13259 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13261 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13262 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13264 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13265 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13266 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13267 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13270 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13271 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
13272 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13273 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13275 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13276 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13279 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13280 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
13283 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13284 #undef THUMB_VARIANT
13285 #define THUMB_VARIANT &arm_ext_v6
13286 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
13287 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
13288 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13289 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13290 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13291 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13292 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13293 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13294 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13295 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
13297 #undef THUMB_VARIANT
13298 #define THUMB_VARIANT &arm_ext_v6t2
13299 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
13300 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13301 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13303 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
13304 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
13306 /* ARM V6 not included in V7M (eg. integer SIMD). */
13307 #undef THUMB_VARIANT
13308 #define THUMB_VARIANT &arm_ext_v6_notm
13309 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
13310 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
13311 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
13312 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13313 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13314 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13315 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13316 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13317 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13318 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13319 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13320 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13321 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13322 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13323 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13324 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13325 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13326 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13327 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13328 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13329 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13330 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13331 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13332 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13333 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13334 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13335 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13336 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13337 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13338 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13339 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13340 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13341 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13342 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13343 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13344 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13345 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13346 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13347 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13348 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13349 UF(rfeib, 9900a00, 1, (RRw), rfe),
13350 UF(rfeda, 8100a00, 1, (RRw), rfe),
13351 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13352 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13353 UF(rfefa, 9900a00, 1, (RRw), rfe),
13354 UF(rfeea, 8100a00, 1, (RRw), rfe),
13355 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13356 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13357 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13358 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13359 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13360 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13361 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13362 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13363 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13364 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13365 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13366 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13367 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13368 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13369 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13370 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13371 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13372 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13373 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13374 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13375 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13376 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13377 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13378 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13379 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13380 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13381 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13382 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13383 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
13384 UF(srsib, 9cd0500, 1, (I31w), srs),
13385 UF(srsda, 84d0500, 1, (I31w), srs),
13386 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
13387 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
13388 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
13389 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
13390 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13391 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13392 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
13395 #define ARM_VARIANT &arm_ext_v6k
13396 #undef THUMB_VARIANT
13397 #define THUMB_VARIANT &arm_ext_v6k
13398 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
13399 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
13400 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
13401 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
13403 #undef THUMB_VARIANT
13404 #define THUMB_VARIANT &arm_ext_v6_notm
13405 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
13406 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
13408 #undef THUMB_VARIANT
13409 #define THUMB_VARIANT &arm_ext_v6t2
13410 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13411 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13412 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13413 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13414 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
13417 #define ARM_VARIANT &arm_ext_v6z
13418 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
13421 #define ARM_VARIANT &arm_ext_v6t2
13422 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
13423 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
13424 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13425 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13427 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13428 TCE(movw, 3000000, f2400000, 2, (RRnpc, Iffff), mov16, t_mov16),
13429 TCE(movt, 3400000, f2c00000, 2, (RRnpc, Iffff), mov16, t_mov16),
13430 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
13432 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13433 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13434 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13435 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13437 UT(cbnz, b900, 2, (RR, EXP), t_czb),
13438 UT(cbz, b100, 2, (RR, EXP), t_czb),
13439 /* ARM does not really have an IT instruction. */
13440 TUE(it, 0, bf08, 1, (COND), it, t_it),
13441 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
13442 TUE(ite, 0, bf04, 1, (COND), it, t_it),
13443 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
13444 TUE(itet, 0, bf06, 1, (COND), it, t_it),
13445 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
13446 TUE(itee, 0, bf02, 1, (COND), it, t_it),
13447 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
13448 TUE(itett, 0, bf07, 1, (COND), it, t_it),
13449 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
13450 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
13451 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
13452 TUE(itete, 0, bf05, 1, (COND), it, t_it),
13453 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
13454 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
13456 /* Thumb2 only instructions. */
13458 #define ARM_VARIANT NULL
13460 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13461 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13462 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
13463 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
13465 /* Thumb-2 hardware division instructions (R and M profiles only). */
13466 #undef THUMB_VARIANT
13467 #define THUMB_VARIANT &arm_ext_div
13468 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
13469 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
13471 /* ARM V7 instructions. */
13473 #define ARM_VARIANT &arm_ext_v7
13474 #undef THUMB_VARIANT
13475 #define THUMB_VARIANT &arm_ext_v7
13476 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
13477 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
13478 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
13479 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
13480 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
13483 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13484 cCE(wfs, e200110, 1, (RR), rd),
13485 cCE(rfs, e300110, 1, (RR), rd),
13486 cCE(wfc, e400110, 1, (RR), rd),
13487 cCE(rfc, e500110, 1, (RR), rd),
13489 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
13490 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
13491 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
13492 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
13494 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
13495 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
13496 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
13497 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
13499 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
13500 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
13501 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
13502 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
13503 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
13504 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
13505 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
13506 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
13507 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
13508 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
13509 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
13510 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
13512 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
13513 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
13514 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
13515 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
13516 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
13517 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
13518 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
13519 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
13520 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
13521 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
13522 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
13523 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
13525 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
13526 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
13527 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
13528 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
13529 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
13530 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
13531 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
13532 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
13533 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
13534 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
13535 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
13536 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
13538 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
13539 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
13540 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
13541 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
13542 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
13543 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
13544 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
13545 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
13546 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
13547 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
13548 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
13549 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
13551 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
13552 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
13553 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
13554 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
13555 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
13556 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
13557 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
13558 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
13559 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
13560 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
13561 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
13562 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
13564 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
13565 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
13566 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
13567 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
13568 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
13569 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
13570 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
13571 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
13572 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
13573 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
13574 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
13575 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
13577 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
13578 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
13579 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
13580 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
13581 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13582 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13583 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13584 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13585 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13586 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13587 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13588 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13590 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13591 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13592 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13593 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13594 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13595 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13596 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13597 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13598 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13599 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13600 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13601 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13603 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13604 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13605 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13606 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13607 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13608 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13609 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13610 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13611 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13612 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13613 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13614 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13616 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13617 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13618 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13619 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13620 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13621 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13622 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13623 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13624 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13625 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13626 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13627 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13629 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13630 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13631 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13632 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13633 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13634 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13635 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13636 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13637 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13638 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13639 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13640 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13642 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13643 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13644 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13645 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13646 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13647 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13648 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13649 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13650 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13651 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13652 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13653 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13655 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13656 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13657 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13658 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13659 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13660 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13661 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13662 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13663 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13664 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13665 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13666 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13668 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13669 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13670 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13671 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13672 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13673 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13674 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13675 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13676 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13677 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13678 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13679 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13681 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13682 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13683 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13684 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13685 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13686 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13687 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13688 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13689 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13690 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13691 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13692 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13694 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13695 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13696 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13697 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13698 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13699 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13700 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13701 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13702 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13703 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13704 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13705 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13707 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13708 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13709 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13710 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13711 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13712 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13713 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13714 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13715 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13716 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13717 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13718 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13720 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13721 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13722 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13723 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13724 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13725 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13726 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13727 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13728 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13729 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13730 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13731 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13733 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13734 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13735 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13736 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13737 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13738 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13739 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13740 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13741 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13742 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13743 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13744 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13746 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13747 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13748 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13749 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13750 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13751 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13752 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13753 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13754 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13755 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13756 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13757 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13759 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13760 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13761 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13762 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13763 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13764 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13765 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13766 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13767 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13768 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13769 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13770 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13772 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13773 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13774 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13775 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13776 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13777 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13778 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13779 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13780 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13781 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13782 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13783 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13785 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13786 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13787 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13788 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13789 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13790 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13791 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13792 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13793 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13794 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13795 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13796 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13798 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13799 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13800 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13801 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13802 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13803 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13804 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13805 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13806 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13807 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13808 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13809 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13811 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13812 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13813 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13814 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13815 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13816 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13817 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13818 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13819 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13820 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13821 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13822 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13824 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13825 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13826 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13827 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13828 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13829 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13830 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13831 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13832 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13833 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13834 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13835 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13837 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13838 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13839 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13840 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13841 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13842 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13843 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13844 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13845 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13846 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13847 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13848 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13850 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13851 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13852 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13853 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13854 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13855 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13856 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13857 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13858 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13859 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13860 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13861 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13863 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13864 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13865 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13866 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13867 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13868 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13869 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13870 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13871 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13872 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13873 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13874 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13876 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13877 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13878 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13879 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13881 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13882 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13883 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13884 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13885 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13886 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13887 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13888 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13889 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13890 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13891 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13892 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13894 /* The implementation of the FIX instruction is broken on some
13895 assemblers, in that it accepts a precision specifier as well as a
13896 rounding specifier, despite the fact that this is meaningless.
13897 To be more compatible, we accept it as well, though of course it
13898 does not set any bits. */
13899 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13900 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13901 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13902 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13903 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13904 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13905 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13906 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13907 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13908 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13909 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13910 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13911 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13913 /* Instructions that were new with the real FPA, call them V2. */
13915 #define ARM_VARIANT &fpu_fpa_ext_v2
13916 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13917 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13918 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13919 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13920 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13921 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13924 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13925 /* Moves and type conversions. */
13926 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
13927 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
13928 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
13929 cCE(fmstat, ef1fa10, 0, (), noargs),
13930 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
13931 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
13932 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
13933 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13934 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
13935 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13936 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
13937 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
13939 /* Memory operations. */
13940 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
13941 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
13942 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13943 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13944 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13945 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13946 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13947 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13948 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13949 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13950 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13951 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13952 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13953 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13954 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13955 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13956 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13957 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13959 /* Monadic operations. */
13960 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
13961 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
13962 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
13964 /* Dyadic operations. */
13965 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13966 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13967 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13968 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13969 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13970 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13971 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13972 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13973 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13976 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
13977 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
13978 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
13979 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
13982 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13983 /* Moves and type conversions. */
13984 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13985 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13986 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13987 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
13988 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
13989 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
13990 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
13991 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13992 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
13993 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13994 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13995 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13996 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13998 /* Memory operations. */
13999 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
14000 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
14001 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14002 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14003 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14004 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14005 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14006 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14007 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14008 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14010 /* Monadic operations. */
14011 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14012 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14013 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14015 /* Dyadic operations. */
14016 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14017 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14018 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14019 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14020 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14021 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14022 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14023 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14024 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14027 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14028 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
14029 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14030 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
14033 #define ARM_VARIANT &fpu_vfp_ext_v2
14034 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
14035 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
14036 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
14037 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
14039 #undef THUMB_VARIANT
14040 #define THUMB_VARIANT &fpu_neon_ext_v1
14042 #define ARM_VARIANT &fpu_neon_ext_v1
14043 /* Data processing with three registers of the same length. */
14044 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
14045 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
14046 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
14047 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14048 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14049 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14050 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14051 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14052 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14053 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14054 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14055 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14056 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14057 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14058 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14059 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14060 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14061 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14062 /* If not immediate, fall back to neon_dyadic_i64_su.
14063 shl_imm should accept I8 I16 I32 I64,
14064 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14065 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
14066 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
14067 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
14068 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
14069 /* Logic ops, types optional & ignored. */
14070 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
14071 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
14072 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
14073 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
14074 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
14075 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
14076 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
14077 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
14078 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
14079 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
14080 /* Bitfield ops, untyped. */
14081 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14082 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14083 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14084 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14085 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14086 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14087 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14088 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14089 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14090 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14091 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14092 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14093 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14094 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14095 back to neon_dyadic_if_su. */
14096 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14097 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14098 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14099 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14100 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14101 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14102 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14103 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14104 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14105 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
14106 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
14107 /* As above, D registers only. */
14108 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14109 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14110 /* Int and float variants, signedness unimportant. */
14111 /* If not scalar, fall back to neon_dyadic_if_i. */
14112 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14113 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14114 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14115 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14116 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
14117 /* Add/sub take types I8 I16 I32 I64 F32. */
14118 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14119 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14120 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14121 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14122 /* vtst takes sizes 8, 16, 32. */
14123 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
14124 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
14125 /* VMUL takes I8 I16 I32 F32 P8. */
14126 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
14127 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
14128 /* VQD{R}MULH takes S16 S32. */
14129 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14130 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14131 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14132 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14133 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14134 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14135 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14136 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14137 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14138 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14139 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14140 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14141 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14142 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14143 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14144 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14146 /* Two address, int/float. Types S8 S16 S32 F32. */
14147 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
14148 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
14149 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
14150 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
14152 /* Data processing with two registers and a shift amount. */
14153 /* Right shifts, and variants with rounding.
14154 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14155 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14156 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14157 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14158 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14159 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14160 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14161 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14162 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14163 /* Shift and insert. Sizes accepted 8 16 32 64. */
14164 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
14165 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
14166 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
14167 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
14168 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14169 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
14170 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
14171 /* Right shift immediate, saturating & narrowing, with rounding variants.
14172 Types accepted S16 S32 S64 U16 U32 U64. */
14173 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14174 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14175 /* As above, unsigned. Types accepted S16 S32 S64. */
14176 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14177 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14178 /* Right shift narrowing. Types accepted I16 I32 I64. */
14179 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14180 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14181 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14182 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
14183 /* CVT with optional immediate for fixed-point variant. */
14184 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
14185 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
14187 /* One register and an immediate value. All encoding special-cased! */
14188 #undef THUMB_VARIANT
14189 #define THUMB_VARIANT &fpu_vfp_ext_v1
14191 #define ARM_VARIANT &fpu_vfp_ext_v1
14192 NCE(vmov, 0, 1, (VMOV), neon_mov),
14194 #undef THUMB_VARIANT
14195 #define THUMB_VARIANT &fpu_neon_ext_v1
14197 #define ARM_VARIANT &fpu_neon_ext_v1
14198 NCE(vmovq, 0, 1, (VMOV), neon_mov),
14199 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
14200 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
14202 /* Data processing, three registers of different lengths. */
14203 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14204 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
14205 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
14206 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
14207 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
14208 /* If not scalar, fall back to neon_dyadic_long.
14209 Vector types as above, scalar types S16 S32 U16 U32. */
14210 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14211 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14212 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14213 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14214 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14215 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14216 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14217 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14218 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14219 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14220 /* Saturating doubling multiplies. Types S16 S32. */
14221 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14222 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14223 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14224 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14225 S16 S32 U16 U32. */
14226 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
14228 /* Extract. Size 8. */
14229 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
14230 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
14232 /* Two registers, miscellaneous. */
14233 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14234 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
14235 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
14236 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
14237 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
14238 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
14239 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
14240 /* Vector replicate. Sizes 8 16 32. */
14241 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
14242 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
14243 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14244 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
14245 /* VMOVN. Types I16 I32 I64. */
14246 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
14247 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14248 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
14249 /* VQMOVUN. Types S16 S32 S64. */
14250 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
14251 /* VZIP / VUZP. Sizes 8 16 32. */
14252 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
14253 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
14254 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
14255 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
14256 /* VQABS / VQNEG. Types S8 S16 S32. */
14257 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14258 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
14259 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14260 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
14261 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14262 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
14263 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
14264 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
14265 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
14266 /* Reciprocal estimates. Types U32 F32. */
14267 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
14268 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
14269 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
14270 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
14271 /* VCLS. Types S8 S16 S32. */
14272 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
14273 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
14274 /* VCLZ. Types I8 I16 I32. */
14275 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
14276 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
14277 /* VCNT. Size 8. */
14278 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
14279 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
14280 /* Two address, untyped. */
14281 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
14282 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
14283 /* VTRN. Sizes 8 16 32. */
14284 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
14285 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
14287 /* Table lookup. Size 8. */
14288 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14289 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14291 #undef THUMB_VARIANT
14292 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
14294 #define ARM_VARIANT &fpu_vfp_ext_v1xd
14296 /* Load/store instructions. Available in Neon or VFPv3. */
14297 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14298 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14299 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
14300 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14301 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14302 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
14303 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
14304 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
14306 #undef THUMB_VARIANT
14307 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14309 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14311 /* Neon element/structure load/store. */
14312 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14313 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14314 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14315 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14316 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14317 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14318 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14319 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14321 #undef THUMB_VARIANT
14322 #define THUMB_VARIANT &fpu_vfp_ext_v3
14324 #define ARM_VARIANT &fpu_vfp_ext_v3
14326 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
14327 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
14328 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14329 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14330 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14331 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14332 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14333 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14334 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14335 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14336 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14337 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14338 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14339 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14340 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14341 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14342 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14343 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14345 #undef THUMB_VARIANT
14347 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14348 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14349 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14350 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14351 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14352 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14353 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14354 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
14355 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
14358 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14359 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
14360 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
14361 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
14362 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
14363 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
14364 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
14365 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
14366 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
14367 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
14368 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14369 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14370 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14371 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14372 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14373 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14374 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14375 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14376 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14377 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
14378 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
14379 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14380 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14381 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14382 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14383 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14384 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14385 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
14386 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
14387 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
14388 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
14389 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
14390 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
14391 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
14392 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
14393 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
14394 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
14395 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
14396 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14397 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14398 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14399 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14400 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14401 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14402 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14403 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14404 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14405 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
14406 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14407 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14408 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14409 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14410 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14411 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14412 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14413 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14414 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14415 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14416 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14417 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14418 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14419 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14420 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14421 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14422 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14423 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14424 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14425 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14426 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14427 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14428 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14429 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14430 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14431 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14432 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14433 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14434 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14435 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14436 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14437 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14438 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14439 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14440 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14441 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14442 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14443 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14444 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14445 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14446 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14447 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
14448 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14449 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14450 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14451 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14452 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14453 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14454 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14455 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14456 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14457 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14458 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14459 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14460 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14461 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14462 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14463 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14464 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14465 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14466 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14467 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14468 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14469 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
14470 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14471 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14472 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14473 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14474 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14475 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14476 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14477 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14478 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14479 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14480 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14481 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14482 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14483 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14484 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14485 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14486 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14487 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14488 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14489 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14490 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14491 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14492 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14493 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14494 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14495 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14496 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14497 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14498 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14499 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14500 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14501 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
14502 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
14503 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
14504 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
14505 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
14506 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
14507 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14508 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14509 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14510 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
14511 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
14512 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
14513 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
14514 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
14515 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
14516 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14517 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14518 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14519 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14520 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
14523 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14524 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
14525 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
14526 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
14527 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
14528 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
14529 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
14530 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
14531 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
14532 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
14533 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
14534 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
14535 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
14536 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
14537 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
14538 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
14539 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
14540 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
14541 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
14542 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
14543 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
14544 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
14545 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
14546 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
14547 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
14548 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
14549 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
14550 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
14551 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
14552 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
14553 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
14554 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
14555 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
14556 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
14557 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
14558 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
14559 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
14560 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
14561 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
14562 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
14563 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
14564 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
14565 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
14566 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
14567 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
14568 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
14569 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
14570 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
14571 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
14572 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
14573 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
14574 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
14575 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
14576 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
14577 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
14578 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
14579 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
14580 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
14581 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
14582 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
14583 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
14584 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
14585 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
14586 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
14587 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
14588 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14589 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14590 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14591 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14592 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14593 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14594 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14595 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14596 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14597 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14598 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14599 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14602 #undef THUMB_VARIANT
14629 /* MD interface: bits in the object file. */
14631 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14632 for use in the a.out file, and stores them in the array pointed to by buf.
14633 This knows about the endian-ness of the target machine and does
14634 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14635 2 (short) and 4 (long) Floating numbers are put out as a series of
14636 LITTLENUMS (shorts, here at least). */
14639 md_number_to_chars (char * buf, valueT val, int n)
14641 if (target_big_endian)
14642 number_to_chars_bigendian (buf, val, n);
14644 number_to_chars_littleendian (buf, val, n);
14648 md_chars_to_number (char * buf, int n)
14651 unsigned char * where = (unsigned char *) buf;
14653 if (target_big_endian)
14658 result |= (*where++ & 255);
14666 result |= (where[n] & 255);
14673 /* MD interface: Sections. */
14675 /* Estimate the size of a frag before relaxing. Assume everything fits in
14679 md_estimate_size_before_relax (fragS * fragp,
14680 segT segtype ATTRIBUTE_UNUSED)
14686 /* Convert a machine dependent frag. */
14689 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14691 unsigned long insn;
14692 unsigned long old_op;
14700 buf = fragp->fr_literal + fragp->fr_fix;
14702 old_op = bfd_get_16(abfd, buf);
14703 if (fragp->fr_symbol) {
14704 exp.X_op = O_symbol;
14705 exp.X_add_symbol = fragp->fr_symbol;
14707 exp.X_op = O_constant;
14709 exp.X_add_number = fragp->fr_offset;
14710 opcode = fragp->fr_subtype;
14713 case T_MNEM_ldr_pc:
14714 case T_MNEM_ldr_pc2:
14715 case T_MNEM_ldr_sp:
14716 case T_MNEM_str_sp:
14723 if (fragp->fr_var == 4)
14725 insn = THUMB_OP32(opcode);
14726 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14728 insn |= (old_op & 0x700) << 4;
14732 insn |= (old_op & 7) << 12;
14733 insn |= (old_op & 0x38) << 13;
14735 insn |= 0x00000c00;
14736 put_thumb32_insn (buf, insn);
14737 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14741 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14743 pc_rel = (opcode == T_MNEM_ldr_pc2);
14746 if (fragp->fr_var == 4)
14748 insn = THUMB_OP32 (opcode);
14749 insn |= (old_op & 0xf0) << 4;
14750 put_thumb32_insn (buf, insn);
14751 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14755 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14756 exp.X_add_number -= 4;
14764 if (fragp->fr_var == 4)
14766 int r0off = (opcode == T_MNEM_mov
14767 || opcode == T_MNEM_movs) ? 0 : 8;
14768 insn = THUMB_OP32 (opcode);
14769 insn = (insn & 0xe1ffffff) | 0x10000000;
14770 insn |= (old_op & 0x700) << r0off;
14771 put_thumb32_insn (buf, insn);
14772 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14776 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14781 if (fragp->fr_var == 4)
14783 insn = THUMB_OP32(opcode);
14784 put_thumb32_insn (buf, insn);
14785 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14788 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14792 if (fragp->fr_var == 4)
14794 insn = THUMB_OP32(opcode);
14795 insn |= (old_op & 0xf00) << 14;
14796 put_thumb32_insn (buf, insn);
14797 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14800 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14803 case T_MNEM_add_sp:
14804 case T_MNEM_add_pc:
14805 case T_MNEM_inc_sp:
14806 case T_MNEM_dec_sp:
14807 if (fragp->fr_var == 4)
14809 /* ??? Choose between add and addw. */
14810 insn = THUMB_OP32 (opcode);
14811 insn |= (old_op & 0xf0) << 4;
14812 put_thumb32_insn (buf, insn);
14813 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14816 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14824 if (fragp->fr_var == 4)
14826 insn = THUMB_OP32 (opcode);
14827 insn |= (old_op & 0xf0) << 4;
14828 insn |= (old_op & 0xf) << 16;
14829 put_thumb32_insn (buf, insn);
14830 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14833 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14839 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14841 fixp->fx_file = fragp->fr_file;
14842 fixp->fx_line = fragp->fr_line;
14843 fragp->fr_fix += fragp->fr_var;
14846 /* Return the size of a relaxable immediate operand instruction.
14847 SHIFT and SIZE specify the form of the allowable immediate. */
14849 relax_immediate (fragS *fragp, int size, int shift)
14855 /* ??? Should be able to do better than this. */
14856 if (fragp->fr_symbol)
14859 low = (1 << shift) - 1;
14860 mask = (1 << (shift + size)) - (1 << shift);
14861 offset = fragp->fr_offset;
14862 /* Force misaligned offsets to 32-bit variant. */
14865 if (offset & ~mask)
14870 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14873 relax_adr (fragS *fragp, asection *sec)
14878 /* Assume worst case for symbols not known to be in the same section. */
14879 if (!S_IS_DEFINED(fragp->fr_symbol)
14880 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14883 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14884 addr = fragp->fr_address + fragp->fr_fix;
14885 addr = (addr + 4) & ~3;
14886 /* Fix the insn as the 4-byte version if the target address is not
14887 sufficiently aligned. This is prevents an infinite loop when two
14888 instructions have contradictory range/alignment requirements. */
14892 if (val < 0 || val > 1020)
14897 /* Return the size of a relaxable add/sub immediate instruction. */
14899 relax_addsub (fragS *fragp, asection *sec)
14904 buf = fragp->fr_literal + fragp->fr_fix;
14905 op = bfd_get_16(sec->owner, buf);
14906 if ((op & 0xf) == ((op >> 4) & 0xf))
14907 return relax_immediate (fragp, 8, 0);
14909 return relax_immediate (fragp, 3, 0);
14913 /* Return the size of a relaxable branch instruction. BITS is the
14914 size of the offset field in the narrow instruction. */
14917 relax_branch (fragS *fragp, asection *sec, int bits)
14923 /* Assume worst case for symbols not known to be in the same section. */
14924 if (!S_IS_DEFINED(fragp->fr_symbol)
14925 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14928 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14929 addr = fragp->fr_address + fragp->fr_fix + 4;
14932 /* Offset is a signed value *2 */
14934 if (val >= limit || val < -limit)
14940 /* Relax a machine dependent frag. This returns the amount by which
14941 the current size of the frag should change. */
14944 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
14949 oldsize = fragp->fr_var;
14950 switch (fragp->fr_subtype)
14952 case T_MNEM_ldr_pc2:
14953 newsize = relax_adr(fragp, sec);
14955 case T_MNEM_ldr_pc:
14956 case T_MNEM_ldr_sp:
14957 case T_MNEM_str_sp:
14958 newsize = relax_immediate(fragp, 8, 2);
14962 newsize = relax_immediate(fragp, 5, 2);
14966 newsize = relax_immediate(fragp, 5, 1);
14970 newsize = relax_immediate(fragp, 5, 0);
14973 newsize = relax_adr(fragp, sec);
14979 newsize = relax_immediate(fragp, 8, 0);
14982 newsize = relax_branch(fragp, sec, 11);
14985 newsize = relax_branch(fragp, sec, 8);
14987 case T_MNEM_add_sp:
14988 case T_MNEM_add_pc:
14989 newsize = relax_immediate (fragp, 8, 2);
14991 case T_MNEM_inc_sp:
14992 case T_MNEM_dec_sp:
14993 newsize = relax_immediate (fragp, 7, 2);
14999 newsize = relax_addsub (fragp, sec);
15006 fragp->fr_var = -newsize;
15007 md_convert_frag (sec->owner, sec, fragp);
15009 return -(newsize + oldsize);
15011 fragp->fr_var = newsize;
15012 return newsize - oldsize;
15015 /* Round up a section size to the appropriate boundary. */
15018 md_section_align (segT segment ATTRIBUTE_UNUSED,
15024 /* Round all sects to multiple of 4. */
15025 return (size + 3) & ~3;
15029 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
15030 of an rs_align_code fragment. */
15033 arm_handle_align (fragS * fragP)
15035 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
15036 static char const thumb_noop[2] = { 0xc0, 0x46 };
15037 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
15038 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
15040 int bytes, fix, noop_size;
15044 if (fragP->fr_type != rs_align_code)
15047 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
15048 p = fragP->fr_literal + fragP->fr_fix;
15051 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
15052 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
15054 if (fragP->tc_frag_data)
15056 if (target_big_endian)
15057 noop = thumb_bigend_noop;
15060 noop_size = sizeof (thumb_noop);
15064 if (target_big_endian)
15065 noop = arm_bigend_noop;
15068 noop_size = sizeof (arm_noop);
15071 if (bytes & (noop_size - 1))
15073 fix = bytes & (noop_size - 1);
15074 memset (p, 0, fix);
15079 while (bytes >= noop_size)
15081 memcpy (p, noop, noop_size);
15083 bytes -= noop_size;
15087 fragP->fr_fix += fix;
15088 fragP->fr_var = noop_size;
15091 /* Called from md_do_align. Used to create an alignment
15092 frag in a code section. */
15095 arm_frag_align_code (int n, int max)
15099 /* We assume that there will never be a requirement
15100 to support alignments greater than 32 bytes. */
15101 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
15102 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15104 p = frag_var (rs_align_code,
15105 MAX_MEM_FOR_RS_ALIGN_CODE,
15107 (relax_substateT) max,
15114 /* Perform target specific initialisation of a frag. */
15117 arm_init_frag (fragS * fragP)
15119 /* Record whether this frag is in an ARM or a THUMB area. */
15120 fragP->tc_frag_data = thumb_mode;
15124 /* When we change sections we need to issue a new mapping symbol. */
15127 arm_elf_change_section (void)
15130 segment_info_type *seginfo;
15132 /* Link an unlinked unwind index table section to the .text section. */
15133 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
15134 && elf_linked_to_section (now_seg) == NULL)
15135 elf_linked_to_section (now_seg) = text_section;
15137 if (!SEG_NORMAL (now_seg))
15140 flags = bfd_get_section_flags (stdoutput, now_seg);
15142 /* We can ignore sections that only contain debug info. */
15143 if ((flags & SEC_ALLOC) == 0)
15146 seginfo = seg_info (now_seg);
15147 mapstate = seginfo->tc_segment_info_data.mapstate;
15148 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
15152 arm_elf_section_type (const char * str, size_t len)
15154 if (len == 5 && strncmp (str, "exidx", 5) == 0)
15155 return SHT_ARM_EXIDX;
15160 /* Code to deal with unwinding tables. */
15162 static void add_unwind_adjustsp (offsetT);
15164 /* Cenerate and deferred unwind frame offset. */
15167 flush_pending_unwind (void)
15171 offset = unwind.pending_offset;
15172 unwind.pending_offset = 0;
15174 add_unwind_adjustsp (offset);
15177 /* Add an opcode to this list for this function. Two-byte opcodes should
15178 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15182 add_unwind_opcode (valueT op, int length)
15184 /* Add any deferred stack adjustment. */
15185 if (unwind.pending_offset)
15186 flush_pending_unwind ();
15188 unwind.sp_restored = 0;
15190 if (unwind.opcode_count + length > unwind.opcode_alloc)
15192 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
15193 if (unwind.opcodes)
15194 unwind.opcodes = xrealloc (unwind.opcodes,
15195 unwind.opcode_alloc);
15197 unwind.opcodes = xmalloc (unwind.opcode_alloc);
15202 unwind.opcodes[unwind.opcode_count] = op & 0xff;
15204 unwind.opcode_count++;
15208 /* Add unwind opcodes to adjust the stack pointer. */
15211 add_unwind_adjustsp (offsetT offset)
15215 if (offset > 0x200)
15217 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15222 /* Long form: 0xb2, uleb128. */
15223 /* This might not fit in a word so add the individual bytes,
15224 remembering the list is built in reverse order. */
15225 o = (valueT) ((offset - 0x204) >> 2);
15227 add_unwind_opcode (0, 1);
15229 /* Calculate the uleb128 encoding of the offset. */
15233 bytes[n] = o & 0x7f;
15239 /* Add the insn. */
15241 add_unwind_opcode (bytes[n - 1], 1);
15242 add_unwind_opcode (0xb2, 1);
15244 else if (offset > 0x100)
15246 /* Two short opcodes. */
15247 add_unwind_opcode (0x3f, 1);
15248 op = (offset - 0x104) >> 2;
15249 add_unwind_opcode (op, 1);
15251 else if (offset > 0)
15253 /* Short opcode. */
15254 op = (offset - 4) >> 2;
15255 add_unwind_opcode (op, 1);
15257 else if (offset < 0)
15260 while (offset > 0x100)
15262 add_unwind_opcode (0x7f, 1);
15265 op = ((offset - 4) >> 2) | 0x40;
15266 add_unwind_opcode (op, 1);
15270 /* Finish the list of unwind opcodes for this function. */
15272 finish_unwind_opcodes (void)
15276 if (unwind.fp_used)
15278 /* Adjust sp as necessary. */
15279 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
15280 flush_pending_unwind ();
15282 /* After restoring sp from the frame pointer. */
15283 op = 0x90 | unwind.fp_reg;
15284 add_unwind_opcode (op, 1);
15287 flush_pending_unwind ();
15291 /* Start an exception table entry. If idx is nonzero this is an index table
15295 start_unwind_section (const segT text_seg, int idx)
15297 const char * text_name;
15298 const char * prefix;
15299 const char * prefix_once;
15300 const char * group_name;
15304 size_t sec_name_len;
15311 prefix = ELF_STRING_ARM_unwind;
15312 prefix_once = ELF_STRING_ARM_unwind_once;
15313 type = SHT_ARM_EXIDX;
15317 prefix = ELF_STRING_ARM_unwind_info;
15318 prefix_once = ELF_STRING_ARM_unwind_info_once;
15319 type = SHT_PROGBITS;
15322 text_name = segment_name (text_seg);
15323 if (streq (text_name, ".text"))
15326 if (strncmp (text_name, ".gnu.linkonce.t.",
15327 strlen (".gnu.linkonce.t.")) == 0)
15329 prefix = prefix_once;
15330 text_name += strlen (".gnu.linkonce.t.");
15333 prefix_len = strlen (prefix);
15334 text_len = strlen (text_name);
15335 sec_name_len = prefix_len + text_len;
15336 sec_name = xmalloc (sec_name_len + 1);
15337 memcpy (sec_name, prefix, prefix_len);
15338 memcpy (sec_name + prefix_len, text_name, text_len);
15339 sec_name[prefix_len + text_len] = '\0';
15345 /* Handle COMDAT group. */
15346 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
15348 group_name = elf_group_name (text_seg);
15349 if (group_name == NULL)
15351 as_bad ("Group section `%s' has no group signature",
15352 segment_name (text_seg));
15353 ignore_rest_of_line ();
15356 flags |= SHF_GROUP;
15360 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
15362 /* Set the setion link for index tables. */
15364 elf_linked_to_section (now_seg) = text_seg;
15368 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15369 personality routine data. Returns zero, or the index table value for
15370 and inline entry. */
15373 create_unwind_entry (int have_data)
15378 /* The current word of data. */
15380 /* The number of bytes left in this word. */
15383 finish_unwind_opcodes ();
15385 /* Remember the current text section. */
15386 unwind.saved_seg = now_seg;
15387 unwind.saved_subseg = now_subseg;
15389 start_unwind_section (now_seg, 0);
15391 if (unwind.personality_routine == NULL)
15393 if (unwind.personality_index == -2)
15396 as_bad (_("handerdata in cantunwind frame"));
15397 return 1; /* EXIDX_CANTUNWIND. */
15400 /* Use a default personality routine if none is specified. */
15401 if (unwind.personality_index == -1)
15403 if (unwind.opcode_count > 3)
15404 unwind.personality_index = 1;
15406 unwind.personality_index = 0;
15409 /* Space for the personality routine entry. */
15410 if (unwind.personality_index == 0)
15412 if (unwind.opcode_count > 3)
15413 as_bad (_("too many unwind opcodes for personality routine 0"));
15417 /* All the data is inline in the index table. */
15420 while (unwind.opcode_count > 0)
15422 unwind.opcode_count--;
15423 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15427 /* Pad with "finish" opcodes. */
15429 data = (data << 8) | 0xb0;
15436 /* We get two opcodes "free" in the first word. */
15437 size = unwind.opcode_count - 2;
15440 /* An extra byte is required for the opcode count. */
15441 size = unwind.opcode_count + 1;
15443 size = (size + 3) >> 2;
15445 as_bad (_("too many unwind opcodes"));
15447 frag_align (2, 0, 0);
15448 record_alignment (now_seg, 2);
15449 unwind.table_entry = expr_build_dot ();
15451 /* Allocate the table entry. */
15452 ptr = frag_more ((size << 2) + 4);
15453 where = frag_now_fix () - ((size << 2) + 4);
15455 switch (unwind.personality_index)
15458 /* ??? Should this be a PLT generating relocation? */
15459 /* Custom personality routine. */
15460 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
15461 BFD_RELOC_ARM_PREL31);
15466 /* Set the first byte to the number of additional words. */
15471 /* ABI defined personality routines. */
15473 /* Three opcodes bytes are packed into the first word. */
15480 /* The size and first two opcode bytes go in the first word. */
15481 data = ((0x80 + unwind.personality_index) << 8) | size;
15486 /* Should never happen. */
15490 /* Pack the opcodes into words (MSB first), reversing the list at the same
15492 while (unwind.opcode_count > 0)
15496 md_number_to_chars (ptr, data, 4);
15501 unwind.opcode_count--;
15503 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15506 /* Finish off the last word. */
15509 /* Pad with "finish" opcodes. */
15511 data = (data << 8) | 0xb0;
15513 md_number_to_chars (ptr, data, 4);
15518 /* Add an empty descriptor if there is no user-specified data. */
15519 ptr = frag_more (4);
15520 md_number_to_chars (ptr, 0, 4);
15526 /* Convert REGNAME to a DWARF-2 register number. */
15529 tc_arm_regname_to_dw2regnum (const char *regname)
15531 int reg = arm_reg_parse ((char **) ®name, REG_TYPE_RN);
15539 /* Initialize the DWARF-2 unwind information for this procedure. */
15542 tc_arm_frame_initial_instructions (void)
15544 cfi_add_CFA_def_cfa (REG_SP, 0);
15546 #endif /* OBJ_ELF */
15549 /* MD interface: Symbol and relocation handling. */
15551 /* Return the address within the segment that a PC-relative fixup is
15552 relative to. For ARM, PC-relative fixups applied to instructions
15553 are generally relative to the location of the fixup plus 8 bytes.
15554 Thumb branches are offset by 4, and Thumb loads relative to PC
15555 require special handling. */
15558 md_pcrel_from_section (fixS * fixP, segT seg)
15560 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
15562 /* If this is pc-relative and we are going to emit a relocation
15563 then we just want to put out any pipeline compensation that the linker
15564 will need. Otherwise we want to use the calculated base.
15565 For WinCE we skip the bias for externals as well, since this
15566 is how the MS ARM-CE assembler behaves and we want to be compatible. */
15568 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
15569 || (arm_force_relocation (fixP)
15571 && !S_IS_EXTERNAL (fixP->fx_addsy)
15576 switch (fixP->fx_r_type)
15578 /* PC relative addressing on the Thumb is slightly odd as the
15579 bottom two bits of the PC are forced to zero for the
15580 calculation. This happens *after* application of the
15581 pipeline offset. However, Thumb adrl already adjusts for
15582 this, so we need not do it again. */
15583 case BFD_RELOC_ARM_THUMB_ADD:
15586 case BFD_RELOC_ARM_THUMB_OFFSET:
15587 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15588 case BFD_RELOC_ARM_T32_ADD_PC12:
15589 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15590 return (base + 4) & ~3;
15592 /* Thumb branches are simply offset by +4. */
15593 case BFD_RELOC_THUMB_PCREL_BRANCH7:
15594 case BFD_RELOC_THUMB_PCREL_BRANCH9:
15595 case BFD_RELOC_THUMB_PCREL_BRANCH12:
15596 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15597 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15598 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15599 case BFD_RELOC_THUMB_PCREL_BLX:
15602 /* ARM mode branches are offset by +8. However, the Windows CE
15603 loader expects the relocation not to take this into account. */
15604 case BFD_RELOC_ARM_PCREL_BRANCH:
15605 case BFD_RELOC_ARM_PCREL_CALL:
15606 case BFD_RELOC_ARM_PCREL_JUMP:
15607 case BFD_RELOC_ARM_PCREL_BLX:
15608 case BFD_RELOC_ARM_PLT32:
15610 /* When handling fixups immediately, because we have already
15611 discovered the value of a symbol, or the address of the frag involved
15612 we must account for the offset by +8, as the OS loader will never see the reloc.
15613 see fixup_segment() in write.c
15614 The S_IS_EXTERNAL test handles the case of global symbols.
15615 Those need the calculated base, not just the pipe compensation the linker will need. */
15617 && fixP->fx_addsy != NULL
15618 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
15619 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
15626 /* ARM mode loads relative to PC are also offset by +8. Unlike
15627 branches, the Windows CE loader *does* expect the relocation
15628 to take this into account. */
15629 case BFD_RELOC_ARM_OFFSET_IMM:
15630 case BFD_RELOC_ARM_OFFSET_IMM8:
15631 case BFD_RELOC_ARM_HWLITERAL:
15632 case BFD_RELOC_ARM_LITERAL:
15633 case BFD_RELOC_ARM_CP_OFF_IMM:
15637 /* Other PC-relative relocations are un-offset. */
15643 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15644 Otherwise we have no need to default values of symbols. */
15647 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15650 if (name[0] == '_' && name[1] == 'G'
15651 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15655 if (symbol_find (name))
15656 as_bad ("GOT already in the symbol table");
15658 GOT_symbol = symbol_new (name, undefined_section,
15659 (valueT) 0, & zero_address_frag);
15669 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15670 computed as two separate immediate values, added together. We
15671 already know that this value cannot be computed by just one ARM
15674 static unsigned int
15675 validate_immediate_twopart (unsigned int val,
15676 unsigned int * highpart)
15681 for (i = 0; i < 32; i += 2)
15682 if (((a = rotate_left (val, i)) & 0xff) != 0)
15688 * highpart = (a >> 8) | ((i + 24) << 7);
15690 else if (a & 0xff0000)
15692 if (a & 0xff000000)
15694 * highpart = (a >> 16) | ((i + 16) << 7);
15698 assert (a & 0xff000000);
15699 * highpart = (a >> 24) | ((i + 8) << 7);
15702 return (a & 0xff) | (i << 7);
15709 validate_offset_imm (unsigned int val, int hwse)
15711 if ((hwse && val > 255) || val > 4095)
15716 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15717 negative immediate constant by altering the instruction. A bit of
15722 by inverting the second operand, and
15725 by negating the second operand. */
15728 negate_data_op (unsigned long * instruction,
15729 unsigned long value)
15732 unsigned long negated, inverted;
15734 negated = encode_arm_immediate (-value);
15735 inverted = encode_arm_immediate (~value);
15737 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15740 /* First negates. */
15741 case OPCODE_SUB: /* ADD <-> SUB */
15742 new_inst = OPCODE_ADD;
15747 new_inst = OPCODE_SUB;
15751 case OPCODE_CMP: /* CMP <-> CMN */
15752 new_inst = OPCODE_CMN;
15757 new_inst = OPCODE_CMP;
15761 /* Now Inverted ops. */
15762 case OPCODE_MOV: /* MOV <-> MVN */
15763 new_inst = OPCODE_MVN;
15768 new_inst = OPCODE_MOV;
15772 case OPCODE_AND: /* AND <-> BIC */
15773 new_inst = OPCODE_BIC;
15778 new_inst = OPCODE_AND;
15782 case OPCODE_ADC: /* ADC <-> SBC */
15783 new_inst = OPCODE_SBC;
15788 new_inst = OPCODE_ADC;
15792 /* We cannot do anything. */
15797 if (value == (unsigned) FAIL)
15800 *instruction &= OPCODE_MASK;
15801 *instruction |= new_inst << DATA_OP_SHIFT;
15805 /* Like negate_data_op, but for Thumb-2. */
15807 static unsigned int
15808 thumb32_negate_data_op (offsetT *instruction, offsetT value)
15812 offsetT negated, inverted;
15814 negated = encode_thumb32_immediate (-value);
15815 inverted = encode_thumb32_immediate (~value);
15817 rd = (*instruction >> 8) & 0xf;
15818 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15821 /* ADD <-> SUB. Includes CMP <-> CMN. */
15822 case T2_OPCODE_SUB:
15823 new_inst = T2_OPCODE_ADD;
15827 case T2_OPCODE_ADD:
15828 new_inst = T2_OPCODE_SUB;
15832 /* ORR <-> ORN. Includes MOV <-> MVN. */
15833 case T2_OPCODE_ORR:
15834 new_inst = T2_OPCODE_ORN;
15838 case T2_OPCODE_ORN:
15839 new_inst = T2_OPCODE_ORR;
15843 /* AND <-> BIC. TST has no inverted equivalent. */
15844 case T2_OPCODE_AND:
15845 new_inst = T2_OPCODE_BIC;
15852 case T2_OPCODE_BIC:
15853 new_inst = T2_OPCODE_AND;
15858 case T2_OPCODE_ADC:
15859 new_inst = T2_OPCODE_SBC;
15863 case T2_OPCODE_SBC:
15864 new_inst = T2_OPCODE_ADC;
15868 /* We cannot do anything. */
15876 *instruction &= T2_OPCODE_MASK;
15877 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15881 /* Read a 32-bit thumb instruction from buf. */
15882 static unsigned long
15883 get_thumb32_insn (char * buf)
15885 unsigned long insn;
15886 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15887 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15893 /* We usually want to set the low bit on the address of thumb function
15894 symbols. In particular .word foo - . should have the low bit set.
15895 Generic code tries to fold the difference of two symbols to
15896 a constant. Prevent this and force a relocation when the first symbols
15897 is a thumb function. */
15899 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
15901 if (op == O_subtract
15902 && l->X_op == O_symbol
15903 && r->X_op == O_symbol
15904 && THUMB_IS_FUNC (l->X_add_symbol))
15906 l->X_op = O_subtract;
15907 l->X_op_symbol = r->X_add_symbol;
15908 l->X_add_number -= r->X_add_number;
15911 /* Process as normal. */
15916 md_apply_fix (fixS * fixP,
15920 offsetT value = * valP;
15922 unsigned int newimm;
15923 unsigned long temp;
15925 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
15927 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
15929 /* Note whether this will delete the relocation. */
15930 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
15933 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15934 consistency with the behavior on 32-bit hosts. Remember value
15936 value &= 0xffffffff;
15937 value ^= 0x80000000;
15938 value -= 0x80000000;
15941 fixP->fx_addnumber = value;
15943 /* Same treatment for fixP->fx_offset. */
15944 fixP->fx_offset &= 0xffffffff;
15945 fixP->fx_offset ^= 0x80000000;
15946 fixP->fx_offset -= 0x80000000;
15948 switch (fixP->fx_r_type)
15950 case BFD_RELOC_NONE:
15951 /* This will need to go in the object file. */
15955 case BFD_RELOC_ARM_IMMEDIATE:
15956 /* We claim that this fixup has been processed here,
15957 even if in fact we generate an error because we do
15958 not have a reloc for it, so tc_gen_reloc will reject it. */
15962 && ! S_IS_DEFINED (fixP->fx_addsy))
15964 as_bad_where (fixP->fx_file, fixP->fx_line,
15965 _("undefined symbol %s used as an immediate value"),
15966 S_GET_NAME (fixP->fx_addsy));
15970 newimm = encode_arm_immediate (value);
15971 temp = md_chars_to_number (buf, INSN_SIZE);
15973 /* If the instruction will fail, see if we can fix things up by
15974 changing the opcode. */
15975 if (newimm == (unsigned int) FAIL
15976 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
15978 as_bad_where (fixP->fx_file, fixP->fx_line,
15979 _("invalid constant (%lx) after fixup"),
15980 (unsigned long) value);
15984 newimm |= (temp & 0xfffff000);
15985 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15988 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
15990 unsigned int highpart = 0;
15991 unsigned int newinsn = 0xe1a00000; /* nop. */
15993 newimm = encode_arm_immediate (value);
15994 temp = md_chars_to_number (buf, INSN_SIZE);
15996 /* If the instruction will fail, see if we can fix things up by
15997 changing the opcode. */
15998 if (newimm == (unsigned int) FAIL
15999 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
16001 /* No ? OK - try using two ADD instructions to generate
16003 newimm = validate_immediate_twopart (value, & highpart);
16005 /* Yes - then make sure that the second instruction is
16007 if (newimm != (unsigned int) FAIL)
16009 /* Still No ? Try using a negated value. */
16010 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
16011 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
16012 /* Otherwise - give up. */
16015 as_bad_where (fixP->fx_file, fixP->fx_line,
16016 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
16021 /* Replace the first operand in the 2nd instruction (which
16022 is the PC) with the destination register. We have
16023 already added in the PC in the first instruction and we
16024 do not want to do it again. */
16025 newinsn &= ~ 0xf0000;
16026 newinsn |= ((newinsn & 0x0f000) << 4);
16029 newimm |= (temp & 0xfffff000);
16030 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16032 highpart |= (newinsn & 0xfffff000);
16033 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
16037 case BFD_RELOC_ARM_OFFSET_IMM:
16038 if (!fixP->fx_done && seg->use_rela_p)
16041 case BFD_RELOC_ARM_LITERAL:
16047 if (validate_offset_imm (value, 0) == FAIL)
16049 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
16050 as_bad_where (fixP->fx_file, fixP->fx_line,
16051 _("invalid literal constant: pool needs to be closer"));
16053 as_bad_where (fixP->fx_file, fixP->fx_line,
16054 _("bad immediate value for offset (%ld)"),
16059 newval = md_chars_to_number (buf, INSN_SIZE);
16060 newval &= 0xff7ff000;
16061 newval |= value | (sign ? INDEX_UP : 0);
16062 md_number_to_chars (buf, newval, INSN_SIZE);
16065 case BFD_RELOC_ARM_OFFSET_IMM8:
16066 case BFD_RELOC_ARM_HWLITERAL:
16072 if (validate_offset_imm (value, 1) == FAIL)
16074 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
16075 as_bad_where (fixP->fx_file, fixP->fx_line,
16076 _("invalid literal constant: pool needs to be closer"));
16078 as_bad (_("bad immediate value for half-word offset (%ld)"),
16083 newval = md_chars_to_number (buf, INSN_SIZE);
16084 newval &= 0xff7ff0f0;
16085 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
16086 md_number_to_chars (buf, newval, INSN_SIZE);
16089 case BFD_RELOC_ARM_T32_OFFSET_U8:
16090 if (value < 0 || value > 1020 || value % 4 != 0)
16091 as_bad_where (fixP->fx_file, fixP->fx_line,
16092 _("bad immediate value for offset (%ld)"), (long) value);
16095 newval = md_chars_to_number (buf+2, THUMB_SIZE);
16097 md_number_to_chars (buf+2, newval, THUMB_SIZE);
16100 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16101 /* This is a complicated relocation used for all varieties of Thumb32
16102 load/store instruction with immediate offset:
16104 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16105 *4, optional writeback(W)
16106 (doubleword load/store)
16108 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16109 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16110 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16111 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16112 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16114 Uppercase letters indicate bits that are already encoded at
16115 this point. Lowercase letters are our problem. For the
16116 second block of instructions, the secondary opcode nybble
16117 (bits 8..11) is present, and bit 23 is zero, even if this is
16118 a PC-relative operation. */
16119 newval = md_chars_to_number (buf, THUMB_SIZE);
16121 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
16123 if ((newval & 0xf0000000) == 0xe0000000)
16125 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16127 newval |= (1 << 23);
16130 if (value % 4 != 0)
16132 as_bad_where (fixP->fx_file, fixP->fx_line,
16133 _("offset not a multiple of 4"));
16139 as_bad_where (fixP->fx_file, fixP->fx_line,
16140 _("offset out of range"));
16145 else if ((newval & 0x000f0000) == 0x000f0000)
16147 /* PC-relative, 12-bit offset. */
16149 newval |= (1 << 23);
16154 as_bad_where (fixP->fx_file, fixP->fx_line,
16155 _("offset out of range"));
16160 else if ((newval & 0x00000100) == 0x00000100)
16162 /* Writeback: 8-bit, +/- offset. */
16164 newval |= (1 << 9);
16169 as_bad_where (fixP->fx_file, fixP->fx_line,
16170 _("offset out of range"));
16175 else if ((newval & 0x00000f00) == 0x00000e00)
16177 /* T-instruction: positive 8-bit offset. */
16178 if (value < 0 || value > 0xff)
16180 as_bad_where (fixP->fx_file, fixP->fx_line,
16181 _("offset out of range"));
16189 /* Positive 12-bit or negative 8-bit offset. */
16193 newval |= (1 << 23);
16203 as_bad_where (fixP->fx_file, fixP->fx_line,
16204 _("offset out of range"));
16211 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
16212 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
16215 case BFD_RELOC_ARM_SHIFT_IMM:
16216 newval = md_chars_to_number (buf, INSN_SIZE);
16217 if (((unsigned long) value) > 32
16219 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
16221 as_bad_where (fixP->fx_file, fixP->fx_line,
16222 _("shift expression is too large"));
16227 /* Shifts of zero must be done as lsl. */
16229 else if (value == 32)
16231 newval &= 0xfffff07f;
16232 newval |= (value & 0x1f) << 7;
16233 md_number_to_chars (buf, newval, INSN_SIZE);
16236 case BFD_RELOC_ARM_T32_IMMEDIATE:
16237 case BFD_RELOC_ARM_T32_IMM12:
16238 case BFD_RELOC_ARM_T32_ADD_PC12:
16239 /* We claim that this fixup has been processed here,
16240 even if in fact we generate an error because we do
16241 not have a reloc for it, so tc_gen_reloc will reject it. */
16245 && ! S_IS_DEFINED (fixP->fx_addsy))
16247 as_bad_where (fixP->fx_file, fixP->fx_line,
16248 _("undefined symbol %s used as an immediate value"),
16249 S_GET_NAME (fixP->fx_addsy));
16253 newval = md_chars_to_number (buf, THUMB_SIZE);
16255 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
16257 /* FUTURE: Implement analogue of negate_data_op for T32. */
16258 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
16260 newimm = encode_thumb32_immediate (value);
16261 if (newimm == (unsigned int) FAIL)
16262 newimm = thumb32_negate_data_op (&newval, value);
16266 /* 12 bit immediate for addw/subw. */
16270 newval ^= 0x00a00000;
16273 newimm = (unsigned int) FAIL;
16278 if (newimm == (unsigned int)FAIL)
16280 as_bad_where (fixP->fx_file, fixP->fx_line,
16281 _("invalid constant (%lx) after fixup"),
16282 (unsigned long) value);
16286 newval |= (newimm & 0x800) << 15;
16287 newval |= (newimm & 0x700) << 4;
16288 newval |= (newimm & 0x0ff);
16290 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
16291 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
16294 case BFD_RELOC_ARM_SMC:
16295 if (((unsigned long) value) > 0xffff)
16296 as_bad_where (fixP->fx_file, fixP->fx_line,
16297 _("invalid smc expression"));
16298 newval = md_chars_to_number (buf, INSN_SIZE);
16299 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
16300 md_number_to_chars (buf, newval, INSN_SIZE);
16303 case BFD_RELOC_ARM_SWI:
16304 if (fixP->tc_fix_data != 0)
16306 if (((unsigned long) value) > 0xff)
16307 as_bad_where (fixP->fx_file, fixP->fx_line,
16308 _("invalid swi expression"));
16309 newval = md_chars_to_number (buf, THUMB_SIZE);
16311 md_number_to_chars (buf, newval, THUMB_SIZE);
16315 if (((unsigned long) value) > 0x00ffffff)
16316 as_bad_where (fixP->fx_file, fixP->fx_line,
16317 _("invalid swi expression"));
16318 newval = md_chars_to_number (buf, INSN_SIZE);
16320 md_number_to_chars (buf, newval, INSN_SIZE);
16324 case BFD_RELOC_ARM_MULTI:
16325 if (((unsigned long) value) > 0xffff)
16326 as_bad_where (fixP->fx_file, fixP->fx_line,
16327 _("invalid expression in load/store multiple"));
16328 newval = value | md_chars_to_number (buf, INSN_SIZE);
16329 md_number_to_chars (buf, newval, INSN_SIZE);
16333 case BFD_RELOC_ARM_PCREL_CALL:
16334 newval = md_chars_to_number (buf, INSN_SIZE);
16335 if ((newval & 0xf0000000) == 0xf0000000)
16339 goto arm_branch_common;
16341 case BFD_RELOC_ARM_PCREL_JUMP:
16342 case BFD_RELOC_ARM_PLT32:
16344 case BFD_RELOC_ARM_PCREL_BRANCH:
16346 goto arm_branch_common;
16348 case BFD_RELOC_ARM_PCREL_BLX:
16351 /* We are going to store value (shifted right by two) in the
16352 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16353 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16354 also be be clear. */
16356 as_bad_where (fixP->fx_file, fixP->fx_line,
16357 _("misaligned branch destination"));
16358 if ((value & (offsetT)0xfe000000) != (offsetT)0
16359 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
16360 as_bad_where (fixP->fx_file, fixP->fx_line,
16361 _("branch out of range"));
16363 if (fixP->fx_done || !seg->use_rela_p)
16365 newval = md_chars_to_number (buf, INSN_SIZE);
16366 newval |= (value >> 2) & 0x00ffffff;
16367 /* Set the H bit on BLX instructions. */
16371 newval |= 0x01000000;
16373 newval &= ~0x01000000;
16375 md_number_to_chars (buf, newval, INSN_SIZE);
16379 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
16380 /* CZB can only branch forward. */
16382 as_bad_where (fixP->fx_file, fixP->fx_line,
16383 _("branch out of range"));
16385 if (fixP->fx_done || !seg->use_rela_p)
16387 newval = md_chars_to_number (buf, THUMB_SIZE);
16388 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
16389 md_number_to_chars (buf, newval, THUMB_SIZE);
16393 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
16394 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
16395 as_bad_where (fixP->fx_file, fixP->fx_line,
16396 _("branch out of range"));
16398 if (fixP->fx_done || !seg->use_rela_p)
16400 newval = md_chars_to_number (buf, THUMB_SIZE);
16401 newval |= (value & 0x1ff) >> 1;
16402 md_number_to_chars (buf, newval, THUMB_SIZE);
16406 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
16407 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
16408 as_bad_where (fixP->fx_file, fixP->fx_line,
16409 _("branch out of range"));
16411 if (fixP->fx_done || !seg->use_rela_p)
16413 newval = md_chars_to_number (buf, THUMB_SIZE);
16414 newval |= (value & 0xfff) >> 1;
16415 md_number_to_chars (buf, newval, THUMB_SIZE);
16419 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16420 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
16421 as_bad_where (fixP->fx_file, fixP->fx_line,
16422 _("conditional branch out of range"));
16424 if (fixP->fx_done || !seg->use_rela_p)
16427 addressT S, J1, J2, lo, hi;
16429 S = (value & 0x00100000) >> 20;
16430 J2 = (value & 0x00080000) >> 19;
16431 J1 = (value & 0x00040000) >> 18;
16432 hi = (value & 0x0003f000) >> 12;
16433 lo = (value & 0x00000ffe) >> 1;
16435 newval = md_chars_to_number (buf, THUMB_SIZE);
16436 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16437 newval |= (S << 10) | hi;
16438 newval2 |= (J1 << 13) | (J2 << 11) | lo;
16439 md_number_to_chars (buf, newval, THUMB_SIZE);
16440 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16444 case BFD_RELOC_THUMB_PCREL_BLX:
16445 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16446 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
16447 as_bad_where (fixP->fx_file, fixP->fx_line,
16448 _("branch out of range"));
16450 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
16451 /* For a BLX instruction, make sure that the relocation is rounded up
16452 to a word boundary. This follows the semantics of the instruction
16453 which specifies that bit 1 of the target address will come from bit
16454 1 of the base address. */
16455 value = (value + 1) & ~ 1;
16457 if (fixP->fx_done || !seg->use_rela_p)
16461 newval = md_chars_to_number (buf, THUMB_SIZE);
16462 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16463 newval |= (value & 0x7fffff) >> 12;
16464 newval2 |= (value & 0xfff) >> 1;
16465 md_number_to_chars (buf, newval, THUMB_SIZE);
16466 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16470 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16471 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
16472 as_bad_where (fixP->fx_file, fixP->fx_line,
16473 _("branch out of range"));
16475 if (fixP->fx_done || !seg->use_rela_p)
16478 addressT S, I1, I2, lo, hi;
16480 S = (value & 0x01000000) >> 24;
16481 I1 = (value & 0x00800000) >> 23;
16482 I2 = (value & 0x00400000) >> 22;
16483 hi = (value & 0x003ff000) >> 12;
16484 lo = (value & 0x00000ffe) >> 1;
16489 newval = md_chars_to_number (buf, THUMB_SIZE);
16490 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16491 newval |= (S << 10) | hi;
16492 newval2 |= (I1 << 13) | (I2 << 11) | lo;
16493 md_number_to_chars (buf, newval, THUMB_SIZE);
16494 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16499 if (fixP->fx_done || !seg->use_rela_p)
16500 md_number_to_chars (buf, value, 1);
16504 if (fixP->fx_done || !seg->use_rela_p)
16505 md_number_to_chars (buf, value, 2);
16509 case BFD_RELOC_ARM_TLS_GD32:
16510 case BFD_RELOC_ARM_TLS_LE32:
16511 case BFD_RELOC_ARM_TLS_IE32:
16512 case BFD_RELOC_ARM_TLS_LDM32:
16513 case BFD_RELOC_ARM_TLS_LDO32:
16514 S_SET_THREAD_LOCAL (fixP->fx_addsy);
16517 case BFD_RELOC_ARM_GOT32:
16518 case BFD_RELOC_ARM_GOTOFF:
16519 case BFD_RELOC_ARM_TARGET2:
16520 if (fixP->fx_done || !seg->use_rela_p)
16521 md_number_to_chars (buf, 0, 4);
16525 case BFD_RELOC_RVA:
16527 case BFD_RELOC_ARM_TARGET1:
16528 case BFD_RELOC_ARM_ROSEGREL32:
16529 case BFD_RELOC_ARM_SBREL32:
16530 case BFD_RELOC_32_PCREL:
16531 if (fixP->fx_done || !seg->use_rela_p)
16533 /* For WinCE we only do this for pcrel fixups. */
16534 if (fixP->fx_done || fixP->fx_pcrel)
16536 md_number_to_chars (buf, value, 4);
16540 case BFD_RELOC_ARM_PREL31:
16541 if (fixP->fx_done || !seg->use_rela_p)
16543 newval = md_chars_to_number (buf, 4) & 0x80000000;
16544 if ((value ^ (value >> 1)) & 0x40000000)
16546 as_bad_where (fixP->fx_file, fixP->fx_line,
16547 _("rel31 relocation overflow"));
16549 newval |= value & 0x7fffffff;
16550 md_number_to_chars (buf, newval, 4);
16555 case BFD_RELOC_ARM_CP_OFF_IMM:
16556 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16557 if (value < -1023 || value > 1023 || (value & 3))
16558 as_bad_where (fixP->fx_file, fixP->fx_line,
16559 _("co-processor offset out of range"));
16564 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16565 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16566 newval = md_chars_to_number (buf, INSN_SIZE);
16568 newval = get_thumb32_insn (buf);
16569 newval &= 0xff7fff00;
16570 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
16572 newval &= ~WRITE_BACK;
16573 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16574 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16575 md_number_to_chars (buf, newval, INSN_SIZE);
16577 put_thumb32_insn (buf, newval);
16580 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
16581 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
16582 if (value < -255 || value > 255)
16583 as_bad_where (fixP->fx_file, fixP->fx_line,
16584 _("co-processor offset out of range"));
16586 goto cp_off_common;
16588 case BFD_RELOC_ARM_THUMB_OFFSET:
16589 newval = md_chars_to_number (buf, THUMB_SIZE);
16590 /* Exactly what ranges, and where the offset is inserted depends
16591 on the type of instruction, we can establish this from the
16593 switch (newval >> 12)
16595 case 4: /* PC load. */
16596 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16597 forced to zero for these loads; md_pcrel_from has already
16598 compensated for this. */
16600 as_bad_where (fixP->fx_file, fixP->fx_line,
16601 _("invalid offset, target not word aligned (0x%08lX)"),
16602 (((unsigned long) fixP->fx_frag->fr_address
16603 + (unsigned long) fixP->fx_where) & ~3)
16604 + (unsigned long) value);
16606 if (value & ~0x3fc)
16607 as_bad_where (fixP->fx_file, fixP->fx_line,
16608 _("invalid offset, value too big (0x%08lX)"),
16611 newval |= value >> 2;
16614 case 9: /* SP load/store. */
16615 if (value & ~0x3fc)
16616 as_bad_where (fixP->fx_file, fixP->fx_line,
16617 _("invalid offset, value too big (0x%08lX)"),
16619 newval |= value >> 2;
16622 case 6: /* Word load/store. */
16624 as_bad_where (fixP->fx_file, fixP->fx_line,
16625 _("invalid offset, value too big (0x%08lX)"),
16627 newval |= value << 4; /* 6 - 2. */
16630 case 7: /* Byte load/store. */
16632 as_bad_where (fixP->fx_file, fixP->fx_line,
16633 _("invalid offset, value too big (0x%08lX)"),
16635 newval |= value << 6;
16638 case 8: /* Halfword load/store. */
16640 as_bad_where (fixP->fx_file, fixP->fx_line,
16641 _("invalid offset, value too big (0x%08lX)"),
16643 newval |= value << 5; /* 6 - 1. */
16647 as_bad_where (fixP->fx_file, fixP->fx_line,
16648 "Unable to process relocation for thumb opcode: %lx",
16649 (unsigned long) newval);
16652 md_number_to_chars (buf, newval, THUMB_SIZE);
16655 case BFD_RELOC_ARM_THUMB_ADD:
16656 /* This is a complicated relocation, since we use it for all of
16657 the following immediate relocations:
16661 9bit ADD/SUB SP word-aligned
16662 10bit ADD PC/SP word-aligned
16664 The type of instruction being processed is encoded in the
16671 newval = md_chars_to_number (buf, THUMB_SIZE);
16673 int rd = (newval >> 4) & 0xf;
16674 int rs = newval & 0xf;
16675 int subtract = !!(newval & 0x8000);
16677 /* Check for HI regs, only very restricted cases allowed:
16678 Adjusting SP, and using PC or SP to get an address. */
16679 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16680 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16681 as_bad_where (fixP->fx_file, fixP->fx_line,
16682 _("invalid Hi register with immediate"));
16684 /* If value is negative, choose the opposite instruction. */
16688 subtract = !subtract;
16690 as_bad_where (fixP->fx_file, fixP->fx_line,
16691 _("immediate value out of range"));
16696 if (value & ~0x1fc)
16697 as_bad_where (fixP->fx_file, fixP->fx_line,
16698 _("invalid immediate for stack address calculation"));
16699 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16700 newval |= value >> 2;
16702 else if (rs == REG_PC || rs == REG_SP)
16704 if (subtract || value & ~0x3fc)
16705 as_bad_where (fixP->fx_file, fixP->fx_line,
16706 _("invalid immediate for address calculation (value = 0x%08lX)"),
16707 (unsigned long) value);
16708 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16710 newval |= value >> 2;
16715 as_bad_where (fixP->fx_file, fixP->fx_line,
16716 _("immediate value out of range"));
16717 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16718 newval |= (rd << 8) | value;
16723 as_bad_where (fixP->fx_file, fixP->fx_line,
16724 _("immediate value out of range"));
16725 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16726 newval |= rd | (rs << 3) | (value << 6);
16729 md_number_to_chars (buf, newval, THUMB_SIZE);
16732 case BFD_RELOC_ARM_THUMB_IMM:
16733 newval = md_chars_to_number (buf, THUMB_SIZE);
16734 if (value < 0 || value > 255)
16735 as_bad_where (fixP->fx_file, fixP->fx_line,
16736 _("invalid immediate: %ld is too large"),
16739 md_number_to_chars (buf, newval, THUMB_SIZE);
16742 case BFD_RELOC_ARM_THUMB_SHIFT:
16743 /* 5bit shift value (0..32). LSL cannot take 32. */
16744 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16745 temp = newval & 0xf800;
16746 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16747 as_bad_where (fixP->fx_file, fixP->fx_line,
16748 _("invalid shift value: %ld"), (long) value);
16749 /* Shifts of zero must be encoded as LSL. */
16751 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16752 /* Shifts of 32 are encoded as zero. */
16753 else if (value == 32)
16755 newval |= value << 6;
16756 md_number_to_chars (buf, newval, THUMB_SIZE);
16759 case BFD_RELOC_VTABLE_INHERIT:
16760 case BFD_RELOC_VTABLE_ENTRY:
16764 case BFD_RELOC_UNUSED:
16766 as_bad_where (fixP->fx_file, fixP->fx_line,
16767 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16771 /* Translate internal representation of relocation info to BFD target
16775 tc_gen_reloc (asection *section, fixS *fixp)
16778 bfd_reloc_code_real_type code;
16780 reloc = xmalloc (sizeof (arelent));
16782 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16783 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16784 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16786 if (fixp->fx_pcrel)
16788 if (section->use_rela_p)
16789 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16791 fixp->fx_offset = reloc->address;
16793 reloc->addend = fixp->fx_offset;
16795 switch (fixp->fx_r_type)
16798 if (fixp->fx_pcrel)
16800 code = BFD_RELOC_8_PCREL;
16805 if (fixp->fx_pcrel)
16807 code = BFD_RELOC_16_PCREL;
16812 if (fixp->fx_pcrel)
16814 code = BFD_RELOC_32_PCREL;
16818 case BFD_RELOC_NONE:
16819 case BFD_RELOC_ARM_PCREL_BRANCH:
16820 case BFD_RELOC_ARM_PCREL_BLX:
16821 case BFD_RELOC_RVA:
16822 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16823 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16824 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16825 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16826 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16827 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16828 case BFD_RELOC_THUMB_PCREL_BLX:
16829 case BFD_RELOC_VTABLE_ENTRY:
16830 case BFD_RELOC_VTABLE_INHERIT:
16831 code = fixp->fx_r_type;
16834 case BFD_RELOC_ARM_LITERAL:
16835 case BFD_RELOC_ARM_HWLITERAL:
16836 /* If this is called then the a literal has
16837 been referenced across a section boundary. */
16838 as_bad_where (fixp->fx_file, fixp->fx_line,
16839 _("literal referenced across section boundary"));
16843 case BFD_RELOC_ARM_GOT32:
16844 case BFD_RELOC_ARM_GOTOFF:
16845 case BFD_RELOC_ARM_PLT32:
16846 case BFD_RELOC_ARM_TARGET1:
16847 case BFD_RELOC_ARM_ROSEGREL32:
16848 case BFD_RELOC_ARM_SBREL32:
16849 case BFD_RELOC_ARM_PREL31:
16850 case BFD_RELOC_ARM_TARGET2:
16851 case BFD_RELOC_ARM_TLS_LE32:
16852 case BFD_RELOC_ARM_TLS_LDO32:
16853 case BFD_RELOC_ARM_PCREL_CALL:
16854 case BFD_RELOC_ARM_PCREL_JUMP:
16855 code = fixp->fx_r_type;
16858 case BFD_RELOC_ARM_TLS_GD32:
16859 case BFD_RELOC_ARM_TLS_IE32:
16860 case BFD_RELOC_ARM_TLS_LDM32:
16861 /* BFD will include the symbol's address in the addend.
16862 But we don't want that, so subtract it out again here. */
16863 if (!S_IS_COMMON (fixp->fx_addsy))
16864 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
16865 code = fixp->fx_r_type;
16869 case BFD_RELOC_ARM_IMMEDIATE:
16870 as_bad_where (fixp->fx_file, fixp->fx_line,
16871 _("internal relocation (type: IMMEDIATE) not fixed up"));
16874 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16875 as_bad_where (fixp->fx_file, fixp->fx_line,
16876 _("ADRL used for a symbol not defined in the same file"));
16879 case BFD_RELOC_ARM_OFFSET_IMM:
16880 if (section->use_rela_p)
16882 code = fixp->fx_r_type;
16886 if (fixp->fx_addsy != NULL
16887 && !S_IS_DEFINED (fixp->fx_addsy)
16888 && S_IS_LOCAL (fixp->fx_addsy))
16890 as_bad_where (fixp->fx_file, fixp->fx_line,
16891 _("undefined local label `%s'"),
16892 S_GET_NAME (fixp->fx_addsy));
16896 as_bad_where (fixp->fx_file, fixp->fx_line,
16897 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16904 switch (fixp->fx_r_type)
16906 case BFD_RELOC_NONE: type = "NONE"; break;
16907 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
16908 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
16909 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
16910 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
16911 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
16912 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
16913 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
16914 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
16915 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
16916 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
16917 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
16918 default: type = _("<unknown>"); break;
16920 as_bad_where (fixp->fx_file, fixp->fx_line,
16921 _("cannot represent %s relocation in this object file format"),
16928 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
16930 && fixp->fx_addsy == GOT_symbol)
16932 code = BFD_RELOC_ARM_GOTPC;
16933 reloc->addend = fixp->fx_offset = reloc->address;
16937 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
16939 if (reloc->howto == NULL)
16941 as_bad_where (fixp->fx_file, fixp->fx_line,
16942 _("cannot represent %s relocation in this object file format"),
16943 bfd_get_reloc_code_name (code));
16947 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16948 vtable entry to be used in the relocation's section offset. */
16949 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16950 reloc->address = fixp->fx_offset;
16955 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16958 cons_fix_new_arm (fragS * frag,
16963 bfd_reloc_code_real_type type;
16967 FIXME: @@ Should look at CPU word size. */
16971 type = BFD_RELOC_8;
16974 type = BFD_RELOC_16;
16978 type = BFD_RELOC_32;
16981 type = BFD_RELOC_64;
16985 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
16988 #if defined OBJ_COFF || defined OBJ_ELF
16990 arm_validate_fix (fixS * fixP)
16992 /* If the destination of the branch is a defined symbol which does not have
16993 the THUMB_FUNC attribute, then we must be calling a function which has
16994 the (interfacearm) attribute. We look for the Thumb entry point to that
16995 function and change the branch to refer to that function instead. */
16996 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
16997 && fixP->fx_addsy != NULL
16998 && S_IS_DEFINED (fixP->fx_addsy)
16999 && ! THUMB_IS_FUNC (fixP->fx_addsy))
17001 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
17007 arm_force_relocation (struct fix * fixp)
17009 #if defined (OBJ_COFF) && defined (TE_PE)
17010 if (fixp->fx_r_type == BFD_RELOC_RVA)
17014 /* Resolve these relocations even if the symbol is extern or weak. */
17015 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
17016 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
17017 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
17018 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17019 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
17020 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
17023 return generic_force_reloc (fixp);
17028 arm_fix_adjustable (fixS * fixP)
17030 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
17031 local labels from being added to the output symbol table when they
17032 are used with the ADRL pseudo op. The ADRL relocation should always
17033 be resolved before the binbary is emitted, so it is safe to say that
17034 it is adjustable. */
17035 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
17038 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
17039 to be cloned, and without this test relocs would still be generated
17040 against the original, pre-cloned symbol. Such symbols would not appear
17041 in the symbol table however, and so a valid reloc could not be
17042 generated. So check to see if the fixup is against a symbol which has
17043 been removed from the symbol chain, and if it is, then allow it to be
17044 adjusted into a reloc against a section symbol. */
17045 if (fixP->fx_addsy != NULL
17046 && ! S_IS_LOCAL (fixP->fx_addsy)
17047 && symbol_next (fixP->fx_addsy) == NULL
17048 && symbol_next (fixP->fx_addsy) == symbol_previous (fixP->fx_addsy))
17056 /* Relocations against Thumb function names must be left unadjusted,
17057 so that the linker can use this information to correctly set the
17058 bottom bit of their addresses. The MIPS version of this function
17059 also prevents relocations that are mips-16 specific, but I do not
17060 know why it does this.
17063 There is one other problem that ought to be addressed here, but
17064 which currently is not: Taking the address of a label (rather
17065 than a function) and then later jumping to that address. Such
17066 addresses also ought to have their bottom bit set (assuming that
17067 they reside in Thumb code), but at the moment they will not. */
17070 arm_fix_adjustable (fixS * fixP)
17072 if (fixP->fx_addsy == NULL)
17075 if (THUMB_IS_FUNC (fixP->fx_addsy)
17076 && fixP->fx_subsy == NULL)
17079 /* We need the symbol name for the VTABLE entries. */
17080 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
17081 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17084 /* Don't allow symbols to be discarded on GOT related relocs. */
17085 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
17086 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
17087 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
17088 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
17089 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
17090 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
17091 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
17092 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
17093 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
17100 elf32_arm_target_format (void)
17103 return (target_big_endian
17104 ? "elf32-bigarm-symbian"
17105 : "elf32-littlearm-symbian");
17106 #elif defined (TE_VXWORKS)
17107 return (target_big_endian
17108 ? "elf32-bigarm-vxworks"
17109 : "elf32-littlearm-vxworks");
17111 if (target_big_endian)
17112 return "elf32-bigarm";
17114 return "elf32-littlearm";
17119 armelf_frob_symbol (symbolS * symp,
17122 elf_frob_symbol (symp, puntp);
17126 /* MD interface: Finalization. */
17128 /* A good place to do this, although this was probably not intended
17129 for this kind of use. We need to dump the literal pool before
17130 references are made to a null symbol pointer. */
17135 literal_pool * pool;
17137 for (pool = list_of_pools; pool; pool = pool->next)
17139 /* Put it at the end of the relevent section. */
17140 subseg_set (pool->section, pool->sub_section);
17142 arm_elf_change_section ();
17148 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17152 arm_adjust_symtab (void)
17157 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17159 if (ARM_IS_THUMB (sym))
17161 if (THUMB_IS_FUNC (sym))
17163 /* Mark the symbol as a Thumb function. */
17164 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
17165 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
17166 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
17168 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
17169 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
17171 as_bad (_("%s: unexpected function type: %d"),
17172 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
17174 else switch (S_GET_STORAGE_CLASS (sym))
17177 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
17180 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
17183 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
17191 if (ARM_IS_INTERWORK (sym))
17192 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
17199 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17201 if (ARM_IS_THUMB (sym))
17203 elf_symbol_type * elf_sym;
17205 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
17206 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
17208 if (! bfd_is_arm_mapping_symbol_name (elf_sym->symbol.name))
17210 /* If it's a .thumb_func, declare it as so,
17211 otherwise tag label as .code 16. */
17212 if (THUMB_IS_FUNC (sym))
17213 elf_sym->internal_elf_sym.st_info =
17214 ELF_ST_INFO (bind, STT_ARM_TFUNC);
17216 elf_sym->internal_elf_sym.st_info =
17217 ELF_ST_INFO (bind, STT_ARM_16BIT);
17224 /* MD interface: Initialization. */
17227 set_constant_flonums (void)
17231 for (i = 0; i < NUM_FLOAT_VALS; i++)
17232 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
17242 if ( (arm_ops_hsh = hash_new ()) == NULL
17243 || (arm_cond_hsh = hash_new ()) == NULL
17244 || (arm_shift_hsh = hash_new ()) == NULL
17245 || (arm_psr_hsh = hash_new ()) == NULL
17246 || (arm_v7m_psr_hsh = hash_new ()) == NULL
17247 || (arm_reg_hsh = hash_new ()) == NULL
17248 || (arm_reloc_hsh = hash_new ()) == NULL
17249 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
17250 as_fatal (_("virtual memory exhausted"));
17252 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
17253 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
17254 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
17255 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
17256 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
17257 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
17258 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
17259 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
17260 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
17261 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
17262 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
17263 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
17265 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
17267 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
17268 (PTR) (barrier_opt_names + i));
17270 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
17271 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
17274 set_constant_flonums ();
17276 /* Set the cpu variant based on the command-line options. We prefer
17277 -mcpu= over -march= if both are set (as for GCC); and we prefer
17278 -mfpu= over any other way of setting the floating point unit.
17279 Use of legacy options with new options are faulted. */
17282 if (mcpu_cpu_opt || march_cpu_opt)
17283 as_bad (_("use of old and new-style options to set CPU type"));
17285 mcpu_cpu_opt = legacy_cpu;
17287 else if (!mcpu_cpu_opt)
17288 mcpu_cpu_opt = march_cpu_opt;
17293 as_bad (_("use of old and new-style options to set FPU type"));
17295 mfpu_opt = legacy_fpu;
17297 else if (!mfpu_opt)
17299 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17300 /* Some environments specify a default FPU. If they don't, infer it
17301 from the processor. */
17303 mfpu_opt = mcpu_fpu_opt;
17305 mfpu_opt = march_fpu_opt;
17307 mfpu_opt = &fpu_default;
17314 mfpu_opt = &fpu_default;
17315 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
17316 mfpu_opt = &fpu_arch_vfp_v2;
17318 mfpu_opt = &fpu_arch_fpa;
17324 mcpu_cpu_opt = &cpu_default;
17325 selected_cpu = cpu_default;
17329 selected_cpu = *mcpu_cpu_opt;
17331 mcpu_cpu_opt = &arm_arch_any;
17334 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17336 arm_arch_used = thumb_arch_used = arm_arch_none;
17338 #if defined OBJ_COFF || defined OBJ_ELF
17340 unsigned int flags = 0;
17342 #if defined OBJ_ELF
17343 flags = meabi_flags;
17345 switch (meabi_flags)
17347 case EF_ARM_EABI_UNKNOWN:
17349 /* Set the flags in the private structure. */
17350 if (uses_apcs_26) flags |= F_APCS26;
17351 if (support_interwork) flags |= F_INTERWORK;
17352 if (uses_apcs_float) flags |= F_APCS_FLOAT;
17353 if (pic_code) flags |= F_PIC;
17354 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
17355 flags |= F_SOFT_FLOAT;
17357 switch (mfloat_abi_opt)
17359 case ARM_FLOAT_ABI_SOFT:
17360 case ARM_FLOAT_ABI_SOFTFP:
17361 flags |= F_SOFT_FLOAT;
17364 case ARM_FLOAT_ABI_HARD:
17365 if (flags & F_SOFT_FLOAT)
17366 as_bad (_("hard-float conflicts with specified fpu"));
17370 /* Using pure-endian doubles (even if soft-float). */
17371 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
17372 flags |= F_VFP_FLOAT;
17374 #if defined OBJ_ELF
17375 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
17376 flags |= EF_ARM_MAVERICK_FLOAT;
17379 case EF_ARM_EABI_VER4:
17380 case EF_ARM_EABI_VER5:
17381 /* No additional flags to set. */
17388 bfd_set_private_flags (stdoutput, flags);
17390 /* We have run out flags in the COFF header to encode the
17391 status of ATPCS support, so instead we create a dummy,
17392 empty, debug section called .arm.atpcs. */
17397 sec = bfd_make_section (stdoutput, ".arm.atpcs");
17401 bfd_set_section_flags
17402 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
17403 bfd_set_section_size (stdoutput, sec, 0);
17404 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
17410 /* Record the CPU type as well. */
17411 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
17412 mach = bfd_mach_arm_iWMMXt;
17413 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
17414 mach = bfd_mach_arm_XScale;
17415 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
17416 mach = bfd_mach_arm_ep9312;
17417 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
17418 mach = bfd_mach_arm_5TE;
17419 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
17421 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17422 mach = bfd_mach_arm_5T;
17424 mach = bfd_mach_arm_5;
17426 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
17428 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17429 mach = bfd_mach_arm_4T;
17431 mach = bfd_mach_arm_4;
17433 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
17434 mach = bfd_mach_arm_3M;
17435 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
17436 mach = bfd_mach_arm_3;
17437 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
17438 mach = bfd_mach_arm_2a;
17439 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
17440 mach = bfd_mach_arm_2;
17442 mach = bfd_mach_arm_unknown;
17444 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
17447 /* Command line processing. */
17450 Invocation line includes a switch not recognized by the base assembler.
17451 See if it's a processor-specific option.
17453 This routine is somewhat complicated by the need for backwards
17454 compatibility (since older releases of gcc can't be changed).
17455 The new options try to make the interface as compatible as
17458 New options (supported) are:
17460 -mcpu=<cpu name> Assemble for selected processor
17461 -march=<architecture name> Assemble for selected architecture
17462 -mfpu=<fpu architecture> Assemble for selected FPU.
17463 -EB/-mbig-endian Big-endian
17464 -EL/-mlittle-endian Little-endian
17465 -k Generate PIC code
17466 -mthumb Start in Thumb mode
17467 -mthumb-interwork Code supports ARM/Thumb interworking
17469 For now we will also provide support for:
17471 -mapcs-32 32-bit Program counter
17472 -mapcs-26 26-bit Program counter
17473 -macps-float Floats passed in FP registers
17474 -mapcs-reentrant Reentrant code
17476 (sometime these will probably be replaced with -mapcs=<list of options>
17477 and -matpcs=<list of options>)
17479 The remaining options are only supported for back-wards compatibility.
17480 Cpu variants, the arm part is optional:
17481 -m[arm]1 Currently not supported.
17482 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17483 -m[arm]3 Arm 3 processor
17484 -m[arm]6[xx], Arm 6 processors
17485 -m[arm]7[xx][t][[d]m] Arm 7 processors
17486 -m[arm]8[10] Arm 8 processors
17487 -m[arm]9[20][tdmi] Arm 9 processors
17488 -mstrongarm[110[0]] StrongARM processors
17489 -mxscale XScale processors
17490 -m[arm]v[2345[t[e]]] Arm architectures
17491 -mall All (except the ARM1)
17493 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17494 -mfpe-old (No float load/store multiples)
17495 -mvfpxd VFP Single precision
17497 -mno-fpu Disable all floating point instructions
17499 The following CPU names are recognized:
17500 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17501 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17502 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17503 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17504 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17505 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17506 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17510 const char * md_shortopts = "m:k";
17512 #ifdef ARM_BI_ENDIAN
17513 #define OPTION_EB (OPTION_MD_BASE + 0)
17514 #define OPTION_EL (OPTION_MD_BASE + 1)
17516 #if TARGET_BYTES_BIG_ENDIAN
17517 #define OPTION_EB (OPTION_MD_BASE + 0)
17519 #define OPTION_EL (OPTION_MD_BASE + 1)
17523 struct option md_longopts[] =
17526 {"EB", no_argument, NULL, OPTION_EB},
17529 {"EL", no_argument, NULL, OPTION_EL},
17531 {NULL, no_argument, NULL, 0}
17534 size_t md_longopts_size = sizeof (md_longopts);
17536 struct arm_option_table
17538 char *option; /* Option name to match. */
17539 char *help; /* Help information. */
17540 int *var; /* Variable to change. */
17541 int value; /* What to change it to. */
17542 char *deprecated; /* If non-null, print this message. */
17545 struct arm_option_table arm_opts[] =
17547 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
17548 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
17549 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17550 &support_interwork, 1, NULL},
17551 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
17552 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
17553 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
17555 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
17556 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
17557 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
17558 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
17561 /* These are recognized by the assembler, but have no affect on code. */
17562 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
17563 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
17564 {NULL, NULL, NULL, 0, NULL}
17567 struct arm_legacy_option_table
17569 char *option; /* Option name to match. */
17570 const arm_feature_set **var; /* Variable to change. */
17571 const arm_feature_set value; /* What to change it to. */
17572 char *deprecated; /* If non-null, print this message. */
17575 const struct arm_legacy_option_table arm_legacy_opts[] =
17577 /* DON'T add any new processors to this list -- we want the whole list
17578 to go away... Add them to the processors table instead. */
17579 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17580 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17581 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17582 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17583 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17584 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17585 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17586 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17587 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17588 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17589 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17590 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17591 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17592 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17593 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17594 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17595 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17596 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17597 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17598 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17599 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17600 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17601 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17602 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17603 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17604 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17605 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17606 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17607 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17608 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17609 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17610 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17611 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17612 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17613 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17614 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17615 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17616 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17617 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17618 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17619 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17620 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17621 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17622 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17623 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17624 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17625 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17626 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17627 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17628 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17629 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17630 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17631 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17632 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17633 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17634 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17635 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17636 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17637 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17638 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17639 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17640 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17641 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17642 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17643 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17644 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17645 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17646 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17647 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
17648 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
17649 N_("use -mcpu=strongarm110")},
17650 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
17651 N_("use -mcpu=strongarm1100")},
17652 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
17653 N_("use -mcpu=strongarm1110")},
17654 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17655 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17656 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17658 /* Architecture variants -- don't add any more to this list either. */
17659 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17660 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17661 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17662 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17663 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17664 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17665 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17666 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17667 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17668 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17669 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17670 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17671 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17672 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17673 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17674 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17675 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17676 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17678 /* Floating point variants -- don't add any more to this list either. */
17679 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17680 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17681 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17682 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17683 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17685 {NULL, NULL, ARM_ARCH_NONE, NULL}
17688 struct arm_cpu_option_table
17691 const arm_feature_set value;
17692 /* For some CPUs we assume an FPU unless the user explicitly sets
17694 const arm_feature_set default_fpu;
17695 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17697 const char *canonical_name;
17700 /* This list should, at a minimum, contain all the cpu names
17701 recognized by GCC. */
17702 static const struct arm_cpu_option_table arm_cpus[] =
17704 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17705 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17706 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17707 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17708 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17709 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17710 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17711 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17712 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17713 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17714 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17715 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17716 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17717 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17718 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17719 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17720 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17721 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17722 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17723 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17724 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17725 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17726 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17727 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17728 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17729 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17730 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17731 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17732 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17733 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17734 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17735 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17736 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17737 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17738 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17739 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17740 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17741 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17742 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17743 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17744 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17745 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17746 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17747 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17748 /* For V5 or later processors we default to using VFP; but the user
17749 should really set the FPU type explicitly. */
17750 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17751 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17752 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17753 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17754 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17755 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17756 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17757 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17758 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17759 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17760 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17761 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17762 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17763 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17764 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17765 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17766 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17767 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17768 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17769 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17770 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17771 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17772 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17773 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17774 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17775 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17776 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17777 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17778 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17779 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17780 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17781 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17782 | FPU_NEON_EXT_V1),
17784 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17785 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17786 /* ??? XSCALE is really an architecture. */
17787 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17788 /* ??? iwmmxt is not a processor. */
17789 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17790 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17792 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17793 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17796 struct arm_arch_option_table
17799 const arm_feature_set value;
17800 const arm_feature_set default_fpu;
17803 /* This list should, at a minimum, contain all the architecture names
17804 recognized by GCC. */
17805 static const struct arm_arch_option_table arm_archs[] =
17807 {"all", ARM_ANY, FPU_ARCH_FPA},
17808 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17809 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17810 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17811 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17812 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17813 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17814 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17815 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17816 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17817 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17818 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17819 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17820 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17821 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17822 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17823 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17824 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17825 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17826 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17827 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17828 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17829 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17830 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17831 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17832 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17833 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17834 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17835 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17836 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17837 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17838 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17839 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17842 /* ISA extensions in the co-processor space. */
17843 struct arm_option_cpu_value_table
17846 const arm_feature_set value;
17849 static const struct arm_option_cpu_value_table arm_extensions[] =
17851 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
17852 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
17853 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
17854 {NULL, ARM_ARCH_NONE}
17857 /* This list should, at a minimum, contain all the fpu names
17858 recognized by GCC. */
17859 static const struct arm_option_cpu_value_table arm_fpus[] =
17861 {"softfpa", FPU_NONE},
17862 {"fpe", FPU_ARCH_FPE},
17863 {"fpe2", FPU_ARCH_FPE},
17864 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
17865 {"fpa", FPU_ARCH_FPA},
17866 {"fpa10", FPU_ARCH_FPA},
17867 {"fpa11", FPU_ARCH_FPA},
17868 {"arm7500fe", FPU_ARCH_FPA},
17869 {"softvfp", FPU_ARCH_VFP},
17870 {"softvfp+vfp", FPU_ARCH_VFP_V2},
17871 {"vfp", FPU_ARCH_VFP_V2},
17872 {"vfp9", FPU_ARCH_VFP_V2},
17873 {"vfp3", FPU_ARCH_VFP_V3},
17874 {"vfp10", FPU_ARCH_VFP_V2},
17875 {"vfp10-r0", FPU_ARCH_VFP_V1},
17876 {"vfpxd", FPU_ARCH_VFP_V1xD},
17877 {"arm1020t", FPU_ARCH_VFP_V1},
17878 {"arm1020e", FPU_ARCH_VFP_V2},
17879 {"arm1136jfs", FPU_ARCH_VFP_V2},
17880 {"arm1136jf-s", FPU_ARCH_VFP_V2},
17881 {"maverick", FPU_ARCH_MAVERICK},
17882 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
17883 {NULL, ARM_ARCH_NONE}
17886 struct arm_option_value_table
17892 static const struct arm_option_value_table arm_float_abis[] =
17894 {"hard", ARM_FLOAT_ABI_HARD},
17895 {"softfp", ARM_FLOAT_ABI_SOFTFP},
17896 {"soft", ARM_FLOAT_ABI_SOFT},
17901 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17902 static const struct arm_option_value_table arm_eabis[] =
17904 {"gnu", EF_ARM_EABI_UNKNOWN},
17905 {"4", EF_ARM_EABI_VER4},
17906 {"5", EF_ARM_EABI_VER5},
17911 struct arm_long_option_table
17913 char * option; /* Substring to match. */
17914 char * help; /* Help information. */
17915 int (* func) (char * subopt); /* Function to decode sub-option. */
17916 char * deprecated; /* If non-null, print this message. */
17920 arm_parse_extension (char * str, const arm_feature_set **opt_p)
17922 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
17924 /* Copy the feature set, so that we can modify it. */
17925 *ext_set = **opt_p;
17928 while (str != NULL && *str != 0)
17930 const struct arm_option_cpu_value_table * opt;
17936 as_bad (_("invalid architectural extension"));
17941 ext = strchr (str, '+');
17944 optlen = ext - str;
17946 optlen = strlen (str);
17950 as_bad (_("missing architectural extension"));
17954 for (opt = arm_extensions; opt->name != NULL; opt++)
17955 if (strncmp (opt->name, str, optlen) == 0)
17957 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
17961 if (opt->name == NULL)
17963 as_bad (_("unknown architectural extnsion `%s'"), str);
17974 arm_parse_cpu (char * str)
17976 const struct arm_cpu_option_table * opt;
17977 char * ext = strchr (str, '+');
17981 optlen = ext - str;
17983 optlen = strlen (str);
17987 as_bad (_("missing cpu name `%s'"), str);
17991 for (opt = arm_cpus; opt->name != NULL; opt++)
17992 if (strncmp (opt->name, str, optlen) == 0)
17994 mcpu_cpu_opt = &opt->value;
17995 mcpu_fpu_opt = &opt->default_fpu;
17996 if (opt->canonical_name)
17997 strcpy(selected_cpu_name, opt->canonical_name);
18001 for (i = 0; i < optlen; i++)
18002 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18003 selected_cpu_name[i] = 0;
18007 return arm_parse_extension (ext, &mcpu_cpu_opt);
18012 as_bad (_("unknown cpu `%s'"), str);
18017 arm_parse_arch (char * str)
18019 const struct arm_arch_option_table *opt;
18020 char *ext = strchr (str, '+');
18024 optlen = ext - str;
18026 optlen = strlen (str);
18030 as_bad (_("missing architecture name `%s'"), str);
18034 for (opt = arm_archs; opt->name != NULL; opt++)
18035 if (streq (opt->name, str))
18037 march_cpu_opt = &opt->value;
18038 march_fpu_opt = &opt->default_fpu;
18039 strcpy(selected_cpu_name, opt->name);
18042 return arm_parse_extension (ext, &march_cpu_opt);
18047 as_bad (_("unknown architecture `%s'\n"), str);
18052 arm_parse_fpu (char * str)
18054 const struct arm_option_cpu_value_table * opt;
18056 for (opt = arm_fpus; opt->name != NULL; opt++)
18057 if (streq (opt->name, str))
18059 mfpu_opt = &opt->value;
18063 as_bad (_("unknown floating point format `%s'\n"), str);
18068 arm_parse_float_abi (char * str)
18070 const struct arm_option_value_table * opt;
18072 for (opt = arm_float_abis; opt->name != NULL; opt++)
18073 if (streq (opt->name, str))
18075 mfloat_abi_opt = opt->value;
18079 as_bad (_("unknown floating point abi `%s'\n"), str);
18085 arm_parse_eabi (char * str)
18087 const struct arm_option_value_table *opt;
18089 for (opt = arm_eabis; opt->name != NULL; opt++)
18090 if (streq (opt->name, str))
18092 meabi_flags = opt->value;
18095 as_bad (_("unknown EABI `%s'\n"), str);
18100 struct arm_long_option_table arm_long_opts[] =
18102 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18103 arm_parse_cpu, NULL},
18104 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18105 arm_parse_arch, NULL},
18106 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18107 arm_parse_fpu, NULL},
18108 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18109 arm_parse_float_abi, NULL},
18111 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18112 arm_parse_eabi, NULL},
18114 {NULL, NULL, 0, NULL}
18118 md_parse_option (int c, char * arg)
18120 struct arm_option_table *opt;
18121 const struct arm_legacy_option_table *fopt;
18122 struct arm_long_option_table *lopt;
18128 target_big_endian = 1;
18134 target_big_endian = 0;
18139 /* Listing option. Just ignore these, we don't support additional
18144 for (opt = arm_opts; opt->option != NULL; opt++)
18146 if (c == opt->option[0]
18147 && ((arg == NULL && opt->option[1] == 0)
18148 || streq (arg, opt->option + 1)))
18150 #if WARN_DEPRECATED
18151 /* If the option is deprecated, tell the user. */
18152 if (opt->deprecated != NULL)
18153 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18154 arg ? arg : "", _(opt->deprecated));
18157 if (opt->var != NULL)
18158 *opt->var = opt->value;
18164 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
18166 if (c == fopt->option[0]
18167 && ((arg == NULL && fopt->option[1] == 0)
18168 || streq (arg, fopt->option + 1)))
18170 #if WARN_DEPRECATED
18171 /* If the option is deprecated, tell the user. */
18172 if (fopt->deprecated != NULL)
18173 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18174 arg ? arg : "", _(fopt->deprecated));
18177 if (fopt->var != NULL)
18178 *fopt->var = &fopt->value;
18184 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18186 /* These options are expected to have an argument. */
18187 if (c == lopt->option[0]
18189 && strncmp (arg, lopt->option + 1,
18190 strlen (lopt->option + 1)) == 0)
18192 #if WARN_DEPRECATED
18193 /* If the option is deprecated, tell the user. */
18194 if (lopt->deprecated != NULL)
18195 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
18196 _(lopt->deprecated));
18199 /* Call the sup-option parser. */
18200 return lopt->func (arg + strlen (lopt->option) - 1);
18211 md_show_usage (FILE * fp)
18213 struct arm_option_table *opt;
18214 struct arm_long_option_table *lopt;
18216 fprintf (fp, _(" ARM-specific assembler options:\n"));
18218 for (opt = arm_opts; opt->option != NULL; opt++)
18219 if (opt->help != NULL)
18220 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
18222 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18223 if (lopt->help != NULL)
18224 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
18228 -EB assemble code for a big-endian cpu\n"));
18233 -EL assemble code for a little-endian cpu\n"));
18242 arm_feature_set flags;
18243 } cpu_arch_ver_table;
18245 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18246 least features first. */
18247 static const cpu_arch_ver_table cpu_arch_ver[] =
18252 {4, ARM_ARCH_V5TE},
18253 {5, ARM_ARCH_V5TEJ},
18257 {9, ARM_ARCH_V6T2},
18258 {10, ARM_ARCH_V7A},
18259 {10, ARM_ARCH_V7R},
18260 {10, ARM_ARCH_V7M},
18264 /* Set the public EABI object attributes. */
18266 aeabi_set_public_attributes (void)
18269 arm_feature_set flags;
18270 arm_feature_set tmp;
18271 const cpu_arch_ver_table *p;
18273 /* Choose the architecture based on the capabilities of the requested cpu
18274 (if any) and/or the instructions actually used. */
18275 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
18276 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
18277 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
18281 for (p = cpu_arch_ver; p->val; p++)
18283 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
18286 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
18290 /* Tag_CPU_name. */
18291 if (selected_cpu_name[0])
18295 p = selected_cpu_name;
18296 if (strncmp(p, "armv", 4) == 0)
18301 for (i = 0; p[i]; i++)
18302 p[i] = TOUPPER (p[i]);
18304 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
18306 /* Tag_CPU_arch. */
18307 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
18308 /* Tag_CPU_arch_profile. */
18309 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
18310 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
18311 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
18312 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
18313 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
18314 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
18315 /* Tag_ARM_ISA_use. */
18316 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
18317 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
18318 /* Tag_THUMB_ISA_use. */
18319 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
18320 elf32_arm_add_eabi_attr_int (stdoutput, 9,
18321 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
18322 /* Tag_VFP_arch. */
18323 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
18324 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
18325 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
18326 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
18327 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
18328 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
18329 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
18330 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
18331 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
18332 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
18333 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
18334 /* Tag_WMMX_arch. */
18335 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
18336 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
18337 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
18338 /* Tag_NEON_arch. */
18339 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
18340 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
18341 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
18344 /* Add the .ARM.attributes section. */
18353 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
18356 aeabi_set_public_attributes ();
18357 size = elf32_arm_eabi_attr_size (stdoutput);
18358 s = subseg_new (".ARM.attributes", 0);
18359 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
18360 addr = frag_now_fix ();
18361 p = frag_more (size);
18362 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
18364 #endif /* OBJ_ELF */
18367 /* Parse a .cpu directive. */
18370 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
18372 const struct arm_cpu_option_table *opt;
18376 name = input_line_pointer;
18377 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18378 input_line_pointer++;
18379 saved_char = *input_line_pointer;
18380 *input_line_pointer = 0;
18382 /* Skip the first "all" entry. */
18383 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
18384 if (streq (opt->name, name))
18386 mcpu_cpu_opt = &opt->value;
18387 selected_cpu = opt->value;
18388 if (opt->canonical_name)
18389 strcpy(selected_cpu_name, opt->canonical_name);
18393 for (i = 0; opt->name[i]; i++)
18394 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18395 selected_cpu_name[i] = 0;
18397 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18398 *input_line_pointer = saved_char;
18399 demand_empty_rest_of_line ();
18402 as_bad (_("unknown cpu `%s'"), name);
18403 *input_line_pointer = saved_char;
18404 ignore_rest_of_line ();
18408 /* Parse a .arch directive. */
18411 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
18413 const struct arm_arch_option_table *opt;
18417 name = input_line_pointer;
18418 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18419 input_line_pointer++;
18420 saved_char = *input_line_pointer;
18421 *input_line_pointer = 0;
18423 /* Skip the first "all" entry. */
18424 for (opt = arm_archs + 1; opt->name != NULL; opt++)
18425 if (streq (opt->name, name))
18427 mcpu_cpu_opt = &opt->value;
18428 selected_cpu = opt->value;
18429 strcpy(selected_cpu_name, opt->name);
18430 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18431 *input_line_pointer = saved_char;
18432 demand_empty_rest_of_line ();
18436 as_bad (_("unknown architecture `%s'\n"), name);
18437 *input_line_pointer = saved_char;
18438 ignore_rest_of_line ();
18442 /* Parse a .fpu directive. */
18445 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
18447 const struct arm_option_cpu_value_table *opt;
18451 name = input_line_pointer;
18452 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18453 input_line_pointer++;
18454 saved_char = *input_line_pointer;
18455 *input_line_pointer = 0;
18457 for (opt = arm_fpus; opt->name != NULL; opt++)
18458 if (streq (opt->name, name))
18460 mfpu_opt = &opt->value;
18461 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18462 *input_line_pointer = saved_char;
18463 demand_empty_rest_of_line ();
18467 as_bad (_("unknown floating point format `%s'\n"), name);
18468 *input_line_pointer = saved_char;
18469 ignore_rest_of_line ();