1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2014 Free Software Foundation, Inc.
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
213 static const arm_feature_set arm_cext_iwmmxt2 =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218 ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230 ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
243 static const arm_feature_set crc_ext_armv8 =
244 ARM_FEATURE (0, CRC_EXT_ARMV8);
246 static int mfloat_abi_opt = -1;
247 /* Record user cpu selection for object attributes. */
248 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
249 /* Must be long enough to hold any of the names in arm_cpus. */
250 static char selected_cpu_name[16];
252 /* Return if no cpu was selected on command-line. */
254 no_cpu_selected (void)
256 return selected_cpu.core == arm_arch_none.core
257 && selected_cpu.coproc == arm_arch_none.coproc;
262 static int meabi_flags = EABI_DEFAULT;
264 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
267 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
272 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
277 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
278 symbolS * GOT_symbol;
281 /* 0: assemble for ARM,
282 1: assemble for Thumb,
283 2: assemble for Thumb even though target CPU does not support thumb
285 static int thumb_mode = 0;
286 /* A value distinct from the possible values for thumb_mode that we
287 can use to record whether thumb_mode has been copied into the
288 tc_frag_data field of a frag. */
289 #define MODE_RECORDED (1 << 4)
291 /* Specifies the intrinsic IT insn behavior mode. */
292 enum implicit_it_mode
294 IMPLICIT_IT_MODE_NEVER = 0x00,
295 IMPLICIT_IT_MODE_ARM = 0x01,
296 IMPLICIT_IT_MODE_THUMB = 0x02,
297 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
299 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
301 /* If unified_syntax is true, we are processing the new unified
302 ARM/Thumb syntax. Important differences from the old ARM mode:
304 - Immediate operands do not require a # prefix.
305 - Conditional affixes always appear at the end of the
306 instruction. (For backward compatibility, those instructions
307 that formerly had them in the middle, continue to accept them
309 - The IT instruction may appear, and if it does is validated
310 against subsequent conditional affixes. It does not generate
313 Important differences from the old Thumb mode:
315 - Immediate operands do not require a # prefix.
316 - Most of the V6T2 instructions are only available in unified mode.
317 - The .N and .W suffixes are recognized and honored (it is an error
318 if they cannot be honored).
319 - All instructions set the flags if and only if they have an 's' affix.
320 - Conditional affixes may be used. They are validated against
321 preceding IT instructions. Unlike ARM mode, you cannot use a
322 conditional affix except in the scope of an IT instruction. */
324 static bfd_boolean unified_syntax = FALSE;
326 /* An immediate operand can start with #, and ld*, st*, pld operands
327 can contain [ and ]. We need to tell APP not to elide whitespace
328 before a [, which can appear as the first operand for pld.
329 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
330 const char arm_symbol_chars[] = "#[]{}";
345 enum neon_el_type type;
349 #define NEON_MAX_TYPE_ELS 4
353 struct neon_type_el el[NEON_MAX_TYPE_ELS];
357 enum it_instruction_type
362 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
363 if inside, should be the last one. */
364 NEUTRAL_IT_INSN, /* This could be either inside or outside,
365 i.e. BKPT and NOP. */
366 IT_INSN /* The IT insn has been parsed. */
369 /* The maximum number of operands we need. */
370 #define ARM_IT_MAX_OPERANDS 6
375 unsigned long instruction;
379 /* "uncond_value" is set to the value in place of the conditional field in
380 unconditional versions of the instruction, or -1 if nothing is
383 struct neon_type vectype;
384 /* This does not indicate an actual NEON instruction, only that
385 the mnemonic accepts neon-style type suffixes. */
387 /* Set to the opcode if the instruction needs relaxation.
388 Zero if the instruction is not relaxed. */
392 bfd_reloc_code_real_type type;
397 enum it_instruction_type it_insn_type;
403 struct neon_type_el vectype;
404 unsigned present : 1; /* Operand present. */
405 unsigned isreg : 1; /* Operand was a register. */
406 unsigned immisreg : 1; /* .imm field is a second register. */
407 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
408 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
409 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
410 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
411 instructions. This allows us to disambiguate ARM <-> vector insns. */
412 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
413 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
414 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
415 unsigned issingle : 1; /* Operand is VFP single-precision register. */
416 unsigned hasreloc : 1; /* Operand has relocation suffix. */
417 unsigned writeback : 1; /* Operand has trailing ! */
418 unsigned preind : 1; /* Preindexed address. */
419 unsigned postind : 1; /* Postindexed address. */
420 unsigned negative : 1; /* Index register was negated. */
421 unsigned shifted : 1; /* Shift applied to operation. */
422 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
423 } operands[ARM_IT_MAX_OPERANDS];
426 static struct arm_it inst;
428 #define NUM_FLOAT_VALS 8
430 const char * fp_const[] =
432 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
435 /* Number of littlenums required to hold an extended precision number. */
436 #define MAX_LITTLENUMS 6
438 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
448 #define CP_T_X 0x00008000
449 #define CP_T_Y 0x00400000
451 #define CONDS_BIT 0x00100000
452 #define LOAD_BIT 0x00100000
454 #define DOUBLE_LOAD_FLAG 0x00000001
458 const char * template_name;
462 #define COND_ALWAYS 0xE
466 const char * template_name;
470 struct asm_barrier_opt
472 const char * template_name;
474 const arm_feature_set arch;
477 /* The bit that distinguishes CPSR and SPSR. */
478 #define SPSR_BIT (1 << 22)
480 /* The individual PSR flag bits. */
481 #define PSR_c (1 << 16)
482 #define PSR_x (1 << 17)
483 #define PSR_s (1 << 18)
484 #define PSR_f (1 << 19)
489 bfd_reloc_code_real_type reloc;
494 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
495 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
500 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
503 /* Bits for DEFINED field in neon_typed_alias. */
504 #define NTA_HASTYPE 1
505 #define NTA_HASINDEX 2
507 struct neon_typed_alias
509 unsigned char defined;
511 struct neon_type_el eltype;
514 /* ARM register categories. This includes coprocessor numbers and various
515 architecture extensions' registers. */
542 /* Structure for a hash table entry for a register.
543 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
544 information which states whether a vector type or index is specified (for a
545 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
551 unsigned char builtin;
552 struct neon_typed_alias * neon;
555 /* Diagnostics used when we don't get a register of the expected type. */
556 const char * const reg_expected_msgs[] =
558 N_("ARM register expected"),
559 N_("bad or missing co-processor number"),
560 N_("co-processor register expected"),
561 N_("FPA register expected"),
562 N_("VFP single precision register expected"),
563 N_("VFP/Neon double precision register expected"),
564 N_("Neon quad precision register expected"),
565 N_("VFP single or double precision register expected"),
566 N_("Neon double or quad precision register expected"),
567 N_("VFP single, double or Neon quad precision register expected"),
568 N_("VFP system register expected"),
569 N_("Maverick MVF register expected"),
570 N_("Maverick MVD register expected"),
571 N_("Maverick MVFX register expected"),
572 N_("Maverick MVDX register expected"),
573 N_("Maverick MVAX register expected"),
574 N_("Maverick DSPSC register expected"),
575 N_("iWMMXt data register expected"),
576 N_("iWMMXt control register expected"),
577 N_("iWMMXt scalar register expected"),
578 N_("XScale accumulator register expected"),
581 /* Some well known registers that we refer to directly elsewhere. */
587 /* ARM instructions take 4bytes in the object file, Thumb instructions
593 /* Basic string to match. */
594 const char * template_name;
596 /* Parameters to instruction. */
597 unsigned int operands[8];
599 /* Conditional tag - see opcode_lookup. */
600 unsigned int tag : 4;
602 /* Basic instruction code. */
603 unsigned int avalue : 28;
605 /* Thumb-format instruction code. */
608 /* Which architecture variant provides this instruction. */
609 const arm_feature_set * avariant;
610 const arm_feature_set * tvariant;
612 /* Function to call to encode instruction in ARM format. */
613 void (* aencode) (void);
615 /* Function to call to encode instruction in Thumb format. */
616 void (* tencode) (void);
619 /* Defines for various bits that we will want to toggle. */
620 #define INST_IMMEDIATE 0x02000000
621 #define OFFSET_REG 0x02000000
622 #define HWOFFSET_IMM 0x00400000
623 #define SHIFT_BY_REG 0x00000010
624 #define PRE_INDEX 0x01000000
625 #define INDEX_UP 0x00800000
626 #define WRITE_BACK 0x00200000
627 #define LDM_TYPE_2_OR_3 0x00400000
628 #define CPSI_MMOD 0x00020000
630 #define LITERAL_MASK 0xf000f000
631 #define OPCODE_MASK 0xfe1fffff
632 #define V4_STR_BIT 0x00000020
633 #define VLDR_VMOV_SAME 0x0040f000
635 #define T2_SUBS_PC_LR 0xf3de8f00
637 #define DATA_OP_SHIFT 21
639 #define T2_OPCODE_MASK 0xfe1fffff
640 #define T2_DATA_OP_SHIFT 21
642 #define A_COND_MASK 0xf0000000
643 #define A_PUSH_POP_OP_MASK 0x0fff0000
645 /* Opcodes for pushing/poping registers to/from the stack. */
646 #define A1_OPCODE_PUSH 0x092d0000
647 #define A2_OPCODE_PUSH 0x052d0004
648 #define A2_OPCODE_POP 0x049d0004
650 /* Codes to distinguish the arithmetic instructions. */
661 #define OPCODE_CMP 10
662 #define OPCODE_CMN 11
663 #define OPCODE_ORR 12
664 #define OPCODE_MOV 13
665 #define OPCODE_BIC 14
666 #define OPCODE_MVN 15
668 #define T2_OPCODE_AND 0
669 #define T2_OPCODE_BIC 1
670 #define T2_OPCODE_ORR 2
671 #define T2_OPCODE_ORN 3
672 #define T2_OPCODE_EOR 4
673 #define T2_OPCODE_ADD 8
674 #define T2_OPCODE_ADC 10
675 #define T2_OPCODE_SBC 11
676 #define T2_OPCODE_SUB 13
677 #define T2_OPCODE_RSB 14
679 #define T_OPCODE_MUL 0x4340
680 #define T_OPCODE_TST 0x4200
681 #define T_OPCODE_CMN 0x42c0
682 #define T_OPCODE_NEG 0x4240
683 #define T_OPCODE_MVN 0x43c0
685 #define T_OPCODE_ADD_R3 0x1800
686 #define T_OPCODE_SUB_R3 0x1a00
687 #define T_OPCODE_ADD_HI 0x4400
688 #define T_OPCODE_ADD_ST 0xb000
689 #define T_OPCODE_SUB_ST 0xb080
690 #define T_OPCODE_ADD_SP 0xa800
691 #define T_OPCODE_ADD_PC 0xa000
692 #define T_OPCODE_ADD_I8 0x3000
693 #define T_OPCODE_SUB_I8 0x3800
694 #define T_OPCODE_ADD_I3 0x1c00
695 #define T_OPCODE_SUB_I3 0x1e00
697 #define T_OPCODE_ASR_R 0x4100
698 #define T_OPCODE_LSL_R 0x4080
699 #define T_OPCODE_LSR_R 0x40c0
700 #define T_OPCODE_ROR_R 0x41c0
701 #define T_OPCODE_ASR_I 0x1000
702 #define T_OPCODE_LSL_I 0x0000
703 #define T_OPCODE_LSR_I 0x0800
705 #define T_OPCODE_MOV_I8 0x2000
706 #define T_OPCODE_CMP_I8 0x2800
707 #define T_OPCODE_CMP_LR 0x4280
708 #define T_OPCODE_MOV_HR 0x4600
709 #define T_OPCODE_CMP_HR 0x4500
711 #define T_OPCODE_LDR_PC 0x4800
712 #define T_OPCODE_LDR_SP 0x9800
713 #define T_OPCODE_STR_SP 0x9000
714 #define T_OPCODE_LDR_IW 0x6800
715 #define T_OPCODE_STR_IW 0x6000
716 #define T_OPCODE_LDR_IH 0x8800
717 #define T_OPCODE_STR_IH 0x8000
718 #define T_OPCODE_LDR_IB 0x7800
719 #define T_OPCODE_STR_IB 0x7000
720 #define T_OPCODE_LDR_RW 0x5800
721 #define T_OPCODE_STR_RW 0x5000
722 #define T_OPCODE_LDR_RH 0x5a00
723 #define T_OPCODE_STR_RH 0x5200
724 #define T_OPCODE_LDR_RB 0x5c00
725 #define T_OPCODE_STR_RB 0x5400
727 #define T_OPCODE_PUSH 0xb400
728 #define T_OPCODE_POP 0xbc00
730 #define T_OPCODE_BRANCH 0xe000
732 #define THUMB_SIZE 2 /* Size of thumb instruction. */
733 #define THUMB_PP_PC_LR 0x0100
734 #define THUMB_LOAD_BIT 0x0800
735 #define THUMB2_LOAD_BIT 0x00100000
737 #define BAD_ARGS _("bad arguments to instruction")
738 #define BAD_SP _("r13 not allowed here")
739 #define BAD_PC _("r15 not allowed here")
740 #define BAD_COND _("instruction cannot be conditional")
741 #define BAD_OVERLAP _("registers may not be the same")
742 #define BAD_HIREG _("lo register required")
743 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
744 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
745 #define BAD_BRANCH _("branch must be last instruction in IT block")
746 #define BAD_NOT_IT _("instruction not allowed in IT block")
747 #define BAD_FPU _("selected FPU does not support instruction")
748 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
749 #define BAD_IT_COND _("incorrect condition in IT block")
750 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
751 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
752 #define BAD_PC_ADDRESSING \
753 _("cannot use register index with PC-relative addressing")
754 #define BAD_PC_WRITEBACK \
755 _("cannot use writeback with PC-relative addressing")
756 #define BAD_RANGE _("branch out of range")
757 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
759 static struct hash_control * arm_ops_hsh;
760 static struct hash_control * arm_cond_hsh;
761 static struct hash_control * arm_shift_hsh;
762 static struct hash_control * arm_psr_hsh;
763 static struct hash_control * arm_v7m_psr_hsh;
764 static struct hash_control * arm_reg_hsh;
765 static struct hash_control * arm_reloc_hsh;
766 static struct hash_control * arm_barrier_opt_hsh;
768 /* Stuff needed to resolve the label ambiguity
777 symbolS * last_label_seen;
778 static int label_is_thumb_function_name = FALSE;
780 /* Literal pool structure. Held on a per-section
781 and per-sub-section basis. */
783 #define MAX_LITERAL_POOL_SIZE 1024
784 typedef struct literal_pool
786 expressionS literals [MAX_LITERAL_POOL_SIZE];
787 unsigned int next_free_entry;
793 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
795 struct literal_pool * next;
796 unsigned int alignment;
799 /* Pointer to a linked list of literal pools. */
800 literal_pool * list_of_pools = NULL;
802 typedef enum asmfunc_states
805 WAITING_ASMFUNC_NAME,
809 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
812 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
814 static struct current_it now_it;
818 now_it_compatible (int cond)
820 return (cond & ~1) == (now_it.cc & ~1);
824 conditional_insn (void)
826 return inst.cond != COND_ALWAYS;
829 static int in_it_block (void);
831 static int handle_it_state (void);
833 static void force_automatic_it_block_close (void);
835 static void it_fsm_post_encode (void);
837 #define set_it_insn_type(type) \
840 inst.it_insn_type = type; \
841 if (handle_it_state () == FAIL) \
846 #define set_it_insn_type_nonvoid(type, failret) \
849 inst.it_insn_type = type; \
850 if (handle_it_state () == FAIL) \
855 #define set_it_insn_type_last() \
858 if (inst.cond == COND_ALWAYS) \
859 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
861 set_it_insn_type (INSIDE_IT_LAST_INSN); \
867 /* This array holds the chars that always start a comment. If the
868 pre-processor is disabled, these aren't very useful. */
869 char arm_comment_chars[] = "@";
871 /* This array holds the chars that only start a comment at the beginning of
872 a line. If the line seems to have the form '# 123 filename'
873 .line and .file directives will appear in the pre-processed output. */
874 /* Note that input_file.c hand checks for '#' at the beginning of the
875 first line of the input file. This is because the compiler outputs
876 #NO_APP at the beginning of its output. */
877 /* Also note that comments like this one will always work. */
878 const char line_comment_chars[] = "#";
880 char arm_line_separator_chars[] = ";";
882 /* Chars that can be used to separate mant
883 from exp in floating point numbers. */
884 const char EXP_CHARS[] = "eE";
886 /* Chars that mean this number is a floating point constant. */
890 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
892 /* Prefix characters that indicate the start of an immediate
894 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
896 /* Separator character handling. */
898 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
901 skip_past_char (char ** str, char c)
903 /* PR gas/14987: Allow for whitespace before the expected character. */
904 skip_whitespace (*str);
915 #define skip_past_comma(str) skip_past_char (str, ',')
917 /* Arithmetic expressions (possibly involving symbols). */
919 /* Return TRUE if anything in the expression is a bignum. */
922 walk_no_bignums (symbolS * sp)
924 if (symbol_get_value_expression (sp)->X_op == O_big)
927 if (symbol_get_value_expression (sp)->X_add_symbol)
929 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
930 || (symbol_get_value_expression (sp)->X_op_symbol
931 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
937 static int in_my_get_expression = 0;
939 /* Third argument to my_get_expression. */
940 #define GE_NO_PREFIX 0
941 #define GE_IMM_PREFIX 1
942 #define GE_OPT_PREFIX 2
943 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
944 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
945 #define GE_OPT_PREFIX_BIG 3
948 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
953 /* In unified syntax, all prefixes are optional. */
955 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
960 case GE_NO_PREFIX: break;
962 if (!is_immediate_prefix (**str))
964 inst.error = _("immediate expression requires a # prefix");
970 case GE_OPT_PREFIX_BIG:
971 if (is_immediate_prefix (**str))
977 memset (ep, 0, sizeof (expressionS));
979 save_in = input_line_pointer;
980 input_line_pointer = *str;
981 in_my_get_expression = 1;
982 seg = expression (ep);
983 in_my_get_expression = 0;
985 if (ep->X_op == O_illegal || ep->X_op == O_absent)
987 /* We found a bad or missing expression in md_operand(). */
988 *str = input_line_pointer;
989 input_line_pointer = save_in;
990 if (inst.error == NULL)
991 inst.error = (ep->X_op == O_absent
992 ? _("missing expression") :_("bad expression"));
997 if (seg != absolute_section
998 && seg != text_section
999 && seg != data_section
1000 && seg != bss_section
1001 && seg != undefined_section)
1003 inst.error = _("bad segment");
1004 *str = input_line_pointer;
1005 input_line_pointer = save_in;
1012 /* Get rid of any bignums now, so that we don't generate an error for which
1013 we can't establish a line number later on. Big numbers are never valid
1014 in instructions, which is where this routine is always called. */
1015 if (prefix_mode != GE_OPT_PREFIX_BIG
1016 && (ep->X_op == O_big
1017 || (ep->X_add_symbol
1018 && (walk_no_bignums (ep->X_add_symbol)
1020 && walk_no_bignums (ep->X_op_symbol))))))
1022 inst.error = _("invalid constant");
1023 *str = input_line_pointer;
1024 input_line_pointer = save_in;
1028 *str = input_line_pointer;
1029 input_line_pointer = save_in;
1033 /* Turn a string in input_line_pointer into a floating point constant
1034 of type TYPE, and store the appropriate bytes in *LITP. The number
1035 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1036 returned, or NULL on OK.
1038 Note that fp constants aren't represent in the normal way on the ARM.
1039 In big endian mode, things are as expected. However, in little endian
1040 mode fp constants are big-endian word-wise, and little-endian byte-wise
1041 within the words. For example, (double) 1.1 in big endian mode is
1042 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1043 the byte sequence 99 99 f1 3f 9a 99 99 99.
1045 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1048 md_atof (int type, char * litP, int * sizeP)
1051 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1083 return _("Unrecognized or unsupported floating point constant");
1086 t = atof_ieee (input_line_pointer, type, words);
1088 input_line_pointer = t;
1089 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1091 if (target_big_endian)
1093 for (i = 0; i < prec; i++)
1095 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1096 litP += sizeof (LITTLENUM_TYPE);
1101 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1102 for (i = prec - 1; i >= 0; i--)
1104 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1105 litP += sizeof (LITTLENUM_TYPE);
1108 /* For a 4 byte float the order of elements in `words' is 1 0.
1109 For an 8 byte float the order is 1 0 3 2. */
1110 for (i = 0; i < prec; i += 2)
1112 md_number_to_chars (litP, (valueT) words[i + 1],
1113 sizeof (LITTLENUM_TYPE));
1114 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1115 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1116 litP += 2 * sizeof (LITTLENUM_TYPE);
1123 /* We handle all bad expressions here, so that we can report the faulty
1124 instruction in the error message. */
1126 md_operand (expressionS * exp)
1128 if (in_my_get_expression)
1129 exp->X_op = O_illegal;
1132 /* Immediate values. */
1134 /* Generic immediate-value read function for use in directives.
1135 Accepts anything that 'expression' can fold to a constant.
1136 *val receives the number. */
1139 immediate_for_directive (int *val)
1142 exp.X_op = O_illegal;
1144 if (is_immediate_prefix (*input_line_pointer))
1146 input_line_pointer++;
1150 if (exp.X_op != O_constant)
1152 as_bad (_("expected #constant"));
1153 ignore_rest_of_line ();
1156 *val = exp.X_add_number;
1161 /* Register parsing. */
1163 /* Generic register parser. CCP points to what should be the
1164 beginning of a register name. If it is indeed a valid register
1165 name, advance CCP over it and return the reg_entry structure;
1166 otherwise return NULL. Does not issue diagnostics. */
1168 static struct reg_entry *
1169 arm_reg_parse_multi (char **ccp)
1173 struct reg_entry *reg;
1175 skip_whitespace (start);
1177 #ifdef REGISTER_PREFIX
1178 if (*start != REGISTER_PREFIX)
1182 #ifdef OPTIONAL_REGISTER_PREFIX
1183 if (*start == OPTIONAL_REGISTER_PREFIX)
1188 if (!ISALPHA (*p) || !is_name_beginner (*p))
1193 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1195 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1205 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1206 enum arm_reg_type type)
1208 /* Alternative syntaxes are accepted for a few register classes. */
1215 /* Generic coprocessor register names are allowed for these. */
1216 if (reg && reg->type == REG_TYPE_CN)
1221 /* For backward compatibility, a bare number is valid here. */
1223 unsigned long processor = strtoul (start, ccp, 10);
1224 if (*ccp != start && processor <= 15)
1228 case REG_TYPE_MMXWC:
1229 /* WC includes WCG. ??? I'm not sure this is true for all
1230 instructions that take WC registers. */
1231 if (reg && reg->type == REG_TYPE_MMXWCG)
1242 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1243 return value is the register number or FAIL. */
1246 arm_reg_parse (char **ccp, enum arm_reg_type type)
1249 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1252 /* Do not allow a scalar (reg+index) to parse as a register. */
1253 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1256 if (reg && reg->type == type)
1259 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1266 /* Parse a Neon type specifier. *STR should point at the leading '.'
1267 character. Does no verification at this stage that the type fits the opcode
1274 Can all be legally parsed by this function.
1276 Fills in neon_type struct pointer with parsed information, and updates STR
1277 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1278 type, FAIL if not. */
1281 parse_neon_type (struct neon_type *type, char **str)
1288 while (type->elems < NEON_MAX_TYPE_ELS)
1290 enum neon_el_type thistype = NT_untyped;
1291 unsigned thissize = -1u;
1298 /* Just a size without an explicit type. */
1302 switch (TOLOWER (*ptr))
1304 case 'i': thistype = NT_integer; break;
1305 case 'f': thistype = NT_float; break;
1306 case 'p': thistype = NT_poly; break;
1307 case 's': thistype = NT_signed; break;
1308 case 'u': thistype = NT_unsigned; break;
1310 thistype = NT_float;
1315 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1321 /* .f is an abbreviation for .f32. */
1322 if (thistype == NT_float && !ISDIGIT (*ptr))
1327 thissize = strtoul (ptr, &ptr, 10);
1329 if (thissize != 8 && thissize != 16 && thissize != 32
1332 as_bad (_("bad size %d in type specifier"), thissize);
1340 type->el[type->elems].type = thistype;
1341 type->el[type->elems].size = thissize;
1346 /* Empty/missing type is not a successful parse. */
1347 if (type->elems == 0)
1355 /* Errors may be set multiple times during parsing or bit encoding
1356 (particularly in the Neon bits), but usually the earliest error which is set
1357 will be the most meaningful. Avoid overwriting it with later (cascading)
1358 errors by calling this function. */
1361 first_error (const char *err)
1367 /* Parse a single type, e.g. ".s32", leading period included. */
1369 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1372 struct neon_type optype;
1376 if (parse_neon_type (&optype, &str) == SUCCESS)
1378 if (optype.elems == 1)
1379 *vectype = optype.el[0];
1382 first_error (_("only one type should be specified for operand"));
1388 first_error (_("vector type expected"));
1400 /* Special meanings for indices (which have a range of 0-7), which will fit into
1403 #define NEON_ALL_LANES 15
1404 #define NEON_INTERLEAVE_LANES 14
1406 /* Parse either a register or a scalar, with an optional type. Return the
1407 register number, and optionally fill in the actual type of the register
1408 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1409 type/index information in *TYPEINFO. */
1412 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1413 enum arm_reg_type *rtype,
1414 struct neon_typed_alias *typeinfo)
1417 struct reg_entry *reg = arm_reg_parse_multi (&str);
1418 struct neon_typed_alias atype;
1419 struct neon_type_el parsetype;
1423 atype.eltype.type = NT_invtype;
1424 atype.eltype.size = -1;
1426 /* Try alternate syntax for some types of register. Note these are mutually
1427 exclusive with the Neon syntax extensions. */
1430 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1438 /* Undo polymorphism when a set of register types may be accepted. */
1439 if ((type == REG_TYPE_NDQ
1440 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1441 || (type == REG_TYPE_VFSD
1442 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1443 || (type == REG_TYPE_NSDQ
1444 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1445 || reg->type == REG_TYPE_NQ))
1446 || (type == REG_TYPE_MMXWC
1447 && (reg->type == REG_TYPE_MMXWCG)))
1448 type = (enum arm_reg_type) reg->type;
1450 if (type != reg->type)
1456 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1458 if ((atype.defined & NTA_HASTYPE) != 0)
1460 first_error (_("can't redefine type for operand"));
1463 atype.defined |= NTA_HASTYPE;
1464 atype.eltype = parsetype;
1467 if (skip_past_char (&str, '[') == SUCCESS)
1469 if (type != REG_TYPE_VFD)
1471 first_error (_("only D registers may be indexed"));
1475 if ((atype.defined & NTA_HASINDEX) != 0)
1477 first_error (_("can't change index for operand"));
1481 atype.defined |= NTA_HASINDEX;
1483 if (skip_past_char (&str, ']') == SUCCESS)
1484 atype.index = NEON_ALL_LANES;
1489 my_get_expression (&exp, &str, GE_NO_PREFIX);
1491 if (exp.X_op != O_constant)
1493 first_error (_("constant expression required"));
1497 if (skip_past_char (&str, ']') == FAIL)
1500 atype.index = exp.X_add_number;
1515 /* Like arm_reg_parse, but allow allow the following extra features:
1516 - If RTYPE is non-zero, return the (possibly restricted) type of the
1517 register (e.g. Neon double or quad reg when either has been requested).
1518 - If this is a Neon vector type with additional type information, fill
1519 in the struct pointed to by VECTYPE (if non-NULL).
1520 This function will fault on encountering a scalar. */
1523 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1524 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1526 struct neon_typed_alias atype;
1528 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1533 /* Do not allow regname(... to parse as a register. */
1537 /* Do not allow a scalar (reg+index) to parse as a register. */
1538 if ((atype.defined & NTA_HASINDEX) != 0)
1540 first_error (_("register operand expected, but got scalar"));
1545 *vectype = atype.eltype;
1552 #define NEON_SCALAR_REG(X) ((X) >> 4)
1553 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1555 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1556 have enough information to be able to do a good job bounds-checking. So, we
1557 just do easy checks here, and do further checks later. */
1560 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1564 struct neon_typed_alias atype;
1566 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1568 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1571 if (atype.index == NEON_ALL_LANES)
1573 first_error (_("scalar must have an index"));
1576 else if (atype.index >= 64 / elsize)
1578 first_error (_("scalar index out of range"));
1583 *type = atype.eltype;
1587 return reg * 16 + atype.index;
1590 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1593 parse_reg_list (char ** strp)
1595 char * str = * strp;
1599 /* We come back here if we get ranges concatenated by '+' or '|'. */
1602 skip_whitespace (str);
1616 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1618 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1628 first_error (_("bad range in register list"));
1632 for (i = cur_reg + 1; i < reg; i++)
1634 if (range & (1 << i))
1636 (_("Warning: duplicated register (r%d) in register list"),
1644 if (range & (1 << reg))
1645 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1647 else if (reg <= cur_reg)
1648 as_tsktsk (_("Warning: register range not in ascending order"));
1653 while (skip_past_comma (&str) != FAIL
1654 || (in_range = 1, *str++ == '-'));
1657 if (skip_past_char (&str, '}') == FAIL)
1659 first_error (_("missing `}'"));
1667 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1670 if (exp.X_op == O_constant)
1672 if (exp.X_add_number
1673 != (exp.X_add_number & 0x0000ffff))
1675 inst.error = _("invalid register mask");
1679 if ((range & exp.X_add_number) != 0)
1681 int regno = range & exp.X_add_number;
1684 regno = (1 << regno) - 1;
1686 (_("Warning: duplicated register (r%d) in register list"),
1690 range |= exp.X_add_number;
1694 if (inst.reloc.type != 0)
1696 inst.error = _("expression too complex");
1700 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1701 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1702 inst.reloc.pc_rel = 0;
1706 if (*str == '|' || *str == '+')
1712 while (another_range);
1718 /* Types of registers in a list. */
1727 /* Parse a VFP register list. If the string is invalid return FAIL.
1728 Otherwise return the number of registers, and set PBASE to the first
1729 register. Parses registers of type ETYPE.
1730 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1731 - Q registers can be used to specify pairs of D registers
1732 - { } can be omitted from around a singleton register list
1733 FIXME: This is not implemented, as it would require backtracking in
1736 This could be done (the meaning isn't really ambiguous), but doesn't
1737 fit in well with the current parsing framework.
1738 - 32 D registers may be used (also true for VFPv3).
1739 FIXME: Types are ignored in these register lists, which is probably a
1743 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1748 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1752 unsigned long mask = 0;
1755 if (skip_past_char (&str, '{') == FAIL)
1757 inst.error = _("expecting {");
1764 regtype = REG_TYPE_VFS;
1769 regtype = REG_TYPE_VFD;
1772 case REGLIST_NEON_D:
1773 regtype = REG_TYPE_NDQ;
1777 if (etype != REGLIST_VFP_S)
1779 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1780 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1784 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1787 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1794 base_reg = max_regs;
1798 int setmask = 1, addregs = 1;
1800 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
1802 if (new_base == FAIL)
1804 first_error (_(reg_expected_msgs[regtype]));
1808 if (new_base >= max_regs)
1810 first_error (_("register out of range in list"));
1814 /* Note: a value of 2 * n is returned for the register Q<n>. */
1815 if (regtype == REG_TYPE_NQ)
1821 if (new_base < base_reg)
1822 base_reg = new_base;
1824 if (mask & (setmask << new_base))
1826 first_error (_("invalid register list"));
1830 if ((mask >> new_base) != 0 && ! warned)
1832 as_tsktsk (_("register list not in ascending order"));
1836 mask |= setmask << new_base;
1839 if (*str == '-') /* We have the start of a range expression */
1845 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1848 inst.error = gettext (reg_expected_msgs[regtype]);
1852 if (high_range >= max_regs)
1854 first_error (_("register out of range in list"));
1858 if (regtype == REG_TYPE_NQ)
1859 high_range = high_range + 1;
1861 if (high_range <= new_base)
1863 inst.error = _("register range not in ascending order");
1867 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1869 if (mask & (setmask << new_base))
1871 inst.error = _("invalid register list");
1875 mask |= setmask << new_base;
1880 while (skip_past_comma (&str) != FAIL);
1884 /* Sanity check -- should have raised a parse error above. */
1885 if (count == 0 || count > max_regs)
1890 /* Final test -- the registers must be consecutive. */
1892 for (i = 0; i < count; i++)
1894 if ((mask & (1u << i)) == 0)
1896 inst.error = _("non-contiguous register range");
1906 /* True if two alias types are the same. */
1909 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1917 if (a->defined != b->defined)
1920 if ((a->defined & NTA_HASTYPE) != 0
1921 && (a->eltype.type != b->eltype.type
1922 || a->eltype.size != b->eltype.size))
1925 if ((a->defined & NTA_HASINDEX) != 0
1926 && (a->index != b->index))
1932 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1933 The base register is put in *PBASE.
1934 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1936 The register stride (minus one) is put in bit 4 of the return value.
1937 Bits [6:5] encode the list length (minus one).
1938 The type of the list elements is put in *ELTYPE, if non-NULL. */
1940 #define NEON_LANE(X) ((X) & 0xf)
1941 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1942 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1945 parse_neon_el_struct_list (char **str, unsigned *pbase,
1946 struct neon_type_el *eltype)
1953 int leading_brace = 0;
1954 enum arm_reg_type rtype = REG_TYPE_NDQ;
1955 const char *const incr_error = _("register stride must be 1 or 2");
1956 const char *const type_error = _("mismatched element/structure types in list");
1957 struct neon_typed_alias firsttype;
1959 if (skip_past_char (&ptr, '{') == SUCCESS)
1964 struct neon_typed_alias atype;
1965 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1969 first_error (_(reg_expected_msgs[rtype]));
1976 if (rtype == REG_TYPE_NQ)
1982 else if (reg_incr == -1)
1984 reg_incr = getreg - base_reg;
1985 if (reg_incr < 1 || reg_incr > 2)
1987 first_error (_(incr_error));
1991 else if (getreg != base_reg + reg_incr * count)
1993 first_error (_(incr_error));
1997 if (! neon_alias_types_same (&atype, &firsttype))
1999 first_error (_(type_error));
2003 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2007 struct neon_typed_alias htype;
2008 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2010 lane = NEON_INTERLEAVE_LANES;
2011 else if (lane != NEON_INTERLEAVE_LANES)
2013 first_error (_(type_error));
2018 else if (reg_incr != 1)
2020 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2024 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2027 first_error (_(reg_expected_msgs[rtype]));
2030 if (! neon_alias_types_same (&htype, &firsttype))
2032 first_error (_(type_error));
2035 count += hireg + dregs - getreg;
2039 /* If we're using Q registers, we can't use [] or [n] syntax. */
2040 if (rtype == REG_TYPE_NQ)
2046 if ((atype.defined & NTA_HASINDEX) != 0)
2050 else if (lane != atype.index)
2052 first_error (_(type_error));
2056 else if (lane == -1)
2057 lane = NEON_INTERLEAVE_LANES;
2058 else if (lane != NEON_INTERLEAVE_LANES)
2060 first_error (_(type_error));
2065 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2067 /* No lane set by [x]. We must be interleaving structures. */
2069 lane = NEON_INTERLEAVE_LANES;
2072 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2073 || (count > 1 && reg_incr == -1))
2075 first_error (_("error parsing element/structure list"));
2079 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2081 first_error (_("expected }"));
2089 *eltype = firsttype.eltype;
2094 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2097 /* Parse an explicit relocation suffix on an expression. This is
2098 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2099 arm_reloc_hsh contains no entries, so this function can only
2100 succeed if there is no () after the word. Returns -1 on error,
2101 BFD_RELOC_UNUSED if there wasn't any suffix. */
2104 parse_reloc (char **str)
2106 struct reloc_entry *r;
2110 return BFD_RELOC_UNUSED;
2115 while (*q && *q != ')' && *q != ',')
2120 if ((r = (struct reloc_entry *)
2121 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2128 /* Directives: register aliases. */
2130 static struct reg_entry *
2131 insert_reg_alias (char *str, unsigned number, int type)
2133 struct reg_entry *new_reg;
2136 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2138 if (new_reg->builtin)
2139 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2141 /* Only warn about a redefinition if it's not defined as the
2143 else if (new_reg->number != number || new_reg->type != type)
2144 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2149 name = xstrdup (str);
2150 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2152 new_reg->name = name;
2153 new_reg->number = number;
2154 new_reg->type = type;
2155 new_reg->builtin = FALSE;
2156 new_reg->neon = NULL;
2158 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2165 insert_neon_reg_alias (char *str, int number, int type,
2166 struct neon_typed_alias *atype)
2168 struct reg_entry *reg = insert_reg_alias (str, number, type);
2172 first_error (_("attempt to redefine typed alias"));
2178 reg->neon = (struct neon_typed_alias *)
2179 xmalloc (sizeof (struct neon_typed_alias));
2180 *reg->neon = *atype;
2184 /* Look for the .req directive. This is of the form:
2186 new_register_name .req existing_register_name
2188 If we find one, or if it looks sufficiently like one that we want to
2189 handle any error here, return TRUE. Otherwise return FALSE. */
2192 create_register_alias (char * newname, char *p)
2194 struct reg_entry *old;
2195 char *oldname, *nbuf;
2198 /* The input scrubber ensures that whitespace after the mnemonic is
2199 collapsed to single spaces. */
2201 if (strncmp (oldname, " .req ", 6) != 0)
2205 if (*oldname == '\0')
2208 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2211 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2215 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2216 the desired alias name, and p points to its end. If not, then
2217 the desired alias name is in the global original_case_string. */
2218 #ifdef TC_CASE_SENSITIVE
2221 newname = original_case_string;
2222 nlen = strlen (newname);
2225 nbuf = (char *) alloca (nlen + 1);
2226 memcpy (nbuf, newname, nlen);
2229 /* Create aliases under the new name as stated; an all-lowercase
2230 version of the new name; and an all-uppercase version of the new
2232 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2234 for (p = nbuf; *p; p++)
2237 if (strncmp (nbuf, newname, nlen))
2239 /* If this attempt to create an additional alias fails, do not bother
2240 trying to create the all-lower case alias. We will fail and issue
2241 a second, duplicate error message. This situation arises when the
2242 programmer does something like:
2245 The second .req creates the "Foo" alias but then fails to create
2246 the artificial FOO alias because it has already been created by the
2248 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2252 for (p = nbuf; *p; p++)
2255 if (strncmp (nbuf, newname, nlen))
2256 insert_reg_alias (nbuf, old->number, old->type);
2262 /* Create a Neon typed/indexed register alias using directives, e.g.:
2267 These typed registers can be used instead of the types specified after the
2268 Neon mnemonic, so long as all operands given have types. Types can also be
2269 specified directly, e.g.:
2270 vadd d0.s32, d1.s32, d2.s32 */
2273 create_neon_reg_alias (char *newname, char *p)
2275 enum arm_reg_type basetype;
2276 struct reg_entry *basereg;
2277 struct reg_entry mybasereg;
2278 struct neon_type ntype;
2279 struct neon_typed_alias typeinfo;
2280 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2283 typeinfo.defined = 0;
2284 typeinfo.eltype.type = NT_invtype;
2285 typeinfo.eltype.size = -1;
2286 typeinfo.index = -1;
2290 if (strncmp (p, " .dn ", 5) == 0)
2291 basetype = REG_TYPE_VFD;
2292 else if (strncmp (p, " .qn ", 5) == 0)
2293 basetype = REG_TYPE_NQ;
2302 basereg = arm_reg_parse_multi (&p);
2304 if (basereg && basereg->type != basetype)
2306 as_bad (_("bad type for register"));
2310 if (basereg == NULL)
2313 /* Try parsing as an integer. */
2314 my_get_expression (&exp, &p, GE_NO_PREFIX);
2315 if (exp.X_op != O_constant)
2317 as_bad (_("expression must be constant"));
2320 basereg = &mybasereg;
2321 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2327 typeinfo = *basereg->neon;
2329 if (parse_neon_type (&ntype, &p) == SUCCESS)
2331 /* We got a type. */
2332 if (typeinfo.defined & NTA_HASTYPE)
2334 as_bad (_("can't redefine the type of a register alias"));
2338 typeinfo.defined |= NTA_HASTYPE;
2339 if (ntype.elems != 1)
2341 as_bad (_("you must specify a single type only"));
2344 typeinfo.eltype = ntype.el[0];
2347 if (skip_past_char (&p, '[') == SUCCESS)
2350 /* We got a scalar index. */
2352 if (typeinfo.defined & NTA_HASINDEX)
2354 as_bad (_("can't redefine the index of a scalar alias"));
2358 my_get_expression (&exp, &p, GE_NO_PREFIX);
2360 if (exp.X_op != O_constant)
2362 as_bad (_("scalar index must be constant"));
2366 typeinfo.defined |= NTA_HASINDEX;
2367 typeinfo.index = exp.X_add_number;
2369 if (skip_past_char (&p, ']') == FAIL)
2371 as_bad (_("expecting ]"));
2376 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2377 the desired alias name, and p points to its end. If not, then
2378 the desired alias name is in the global original_case_string. */
2379 #ifdef TC_CASE_SENSITIVE
2380 namelen = nameend - newname;
2382 newname = original_case_string;
2383 namelen = strlen (newname);
2386 namebuf = (char *) alloca (namelen + 1);
2387 strncpy (namebuf, newname, namelen);
2388 namebuf[namelen] = '\0';
2390 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2391 typeinfo.defined != 0 ? &typeinfo : NULL);
2393 /* Insert name in all uppercase. */
2394 for (p = namebuf; *p; p++)
2397 if (strncmp (namebuf, newname, namelen))
2398 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2399 typeinfo.defined != 0 ? &typeinfo : NULL);
2401 /* Insert name in all lowercase. */
2402 for (p = namebuf; *p; p++)
2405 if (strncmp (namebuf, newname, namelen))
2406 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2407 typeinfo.defined != 0 ? &typeinfo : NULL);
2412 /* Should never be called, as .req goes between the alias and the
2413 register name, not at the beginning of the line. */
2416 s_req (int a ATTRIBUTE_UNUSED)
2418 as_bad (_("invalid syntax for .req directive"));
2422 s_dn (int a ATTRIBUTE_UNUSED)
2424 as_bad (_("invalid syntax for .dn directive"));
2428 s_qn (int a ATTRIBUTE_UNUSED)
2430 as_bad (_("invalid syntax for .qn directive"));
2433 /* The .unreq directive deletes an alias which was previously defined
2434 by .req. For example:
2440 s_unreq (int a ATTRIBUTE_UNUSED)
2445 name = input_line_pointer;
2447 while (*input_line_pointer != 0
2448 && *input_line_pointer != ' '
2449 && *input_line_pointer != '\n')
2450 ++input_line_pointer;
2452 saved_char = *input_line_pointer;
2453 *input_line_pointer = 0;
2456 as_bad (_("invalid syntax for .unreq directive"));
2459 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2463 as_bad (_("unknown register alias '%s'"), name);
2464 else if (reg->builtin)
2465 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2472 hash_delete (arm_reg_hsh, name, FALSE);
2473 free ((char *) reg->name);
2478 /* Also locate the all upper case and all lower case versions.
2479 Do not complain if we cannot find one or the other as it
2480 was probably deleted above. */
2482 nbuf = strdup (name);
2483 for (p = nbuf; *p; p++)
2485 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2488 hash_delete (arm_reg_hsh, nbuf, FALSE);
2489 free ((char *) reg->name);
2495 for (p = nbuf; *p; p++)
2497 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2500 hash_delete (arm_reg_hsh, nbuf, FALSE);
2501 free ((char *) reg->name);
2511 *input_line_pointer = saved_char;
2512 demand_empty_rest_of_line ();
2515 /* Directives: Instruction set selection. */
2518 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2519 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2520 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2521 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2523 /* Create a new mapping symbol for the transition to STATE. */
2526 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2529 const char * symname;
2536 type = BSF_NO_FLAGS;
2540 type = BSF_NO_FLAGS;
2544 type = BSF_NO_FLAGS;
2550 symbolP = symbol_new (symname, now_seg, value, frag);
2551 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2556 THUMB_SET_FUNC (symbolP, 0);
2557 ARM_SET_THUMB (symbolP, 0);
2558 ARM_SET_INTERWORK (symbolP, support_interwork);
2562 THUMB_SET_FUNC (symbolP, 1);
2563 ARM_SET_THUMB (symbolP, 1);
2564 ARM_SET_INTERWORK (symbolP, support_interwork);
2572 /* Save the mapping symbols for future reference. Also check that
2573 we do not place two mapping symbols at the same offset within a
2574 frag. We'll handle overlap between frags in
2575 check_mapping_symbols.
2577 If .fill or other data filling directive generates zero sized data,
2578 the mapping symbol for the following code will have the same value
2579 as the one generated for the data filling directive. In this case,
2580 we replace the old symbol with the new one at the same address. */
2583 if (frag->tc_frag_data.first_map != NULL)
2585 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2586 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2588 frag->tc_frag_data.first_map = symbolP;
2590 if (frag->tc_frag_data.last_map != NULL)
2592 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2593 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2594 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2596 frag->tc_frag_data.last_map = symbolP;
2599 /* We must sometimes convert a region marked as code to data during
2600 code alignment, if an odd number of bytes have to be padded. The
2601 code mapping symbol is pushed to an aligned address. */
2604 insert_data_mapping_symbol (enum mstate state,
2605 valueT value, fragS *frag, offsetT bytes)
2607 /* If there was already a mapping symbol, remove it. */
2608 if (frag->tc_frag_data.last_map != NULL
2609 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2611 symbolS *symp = frag->tc_frag_data.last_map;
2615 know (frag->tc_frag_data.first_map == symp);
2616 frag->tc_frag_data.first_map = NULL;
2618 frag->tc_frag_data.last_map = NULL;
2619 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2622 make_mapping_symbol (MAP_DATA, value, frag);
2623 make_mapping_symbol (state, value + bytes, frag);
2626 static void mapping_state_2 (enum mstate state, int max_chars);
2628 /* Set the mapping state to STATE. Only call this when about to
2629 emit some STATE bytes to the file. */
2632 mapping_state (enum mstate state)
2634 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2636 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2638 if (mapstate == state)
2639 /* The mapping symbol has already been emitted.
2640 There is nothing else to do. */
2643 if (state == MAP_ARM || state == MAP_THUMB)
2645 All ARM instructions require 4-byte alignment.
2646 (Almost) all Thumb instructions require 2-byte alignment.
2648 When emitting instructions into any section, mark the section
2651 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2652 but themselves require 2-byte alignment; this applies to some
2653 PC- relative forms. However, these cases will invovle implicit
2654 literal pool generation or an explicit .align >=2, both of
2655 which will cause the section to me marked with sufficient
2656 alignment. Thus, we don't handle those cases here. */
2657 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2659 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2660 /* This case will be evaluated later in the next else. */
2662 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2663 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2665 /* Only add the symbol if the offset is > 0:
2666 if we're at the first frag, check it's size > 0;
2667 if we're not at the first frag, then for sure
2668 the offset is > 0. */
2669 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2670 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2673 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2676 mapping_state_2 (state, 0);
2680 /* Same as mapping_state, but MAX_CHARS bytes have already been
2681 allocated. Put the mapping symbol that far back. */
2684 mapping_state_2 (enum mstate state, int max_chars)
2686 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2688 if (!SEG_NORMAL (now_seg))
2691 if (mapstate == state)
2692 /* The mapping symbol has already been emitted.
2693 There is nothing else to do. */
2696 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2697 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2700 #define mapping_state(x) ((void)0)
2701 #define mapping_state_2(x, y) ((void)0)
2704 /* Find the real, Thumb encoded start of a Thumb function. */
2708 find_real_start (symbolS * symbolP)
2711 const char * name = S_GET_NAME (symbolP);
2712 symbolS * new_target;
2714 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2715 #define STUB_NAME ".real_start_of"
2720 /* The compiler may generate BL instructions to local labels because
2721 it needs to perform a branch to a far away location. These labels
2722 do not have a corresponding ".real_start_of" label. We check
2723 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2724 the ".real_start_of" convention for nonlocal branches. */
2725 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2728 real_start = ACONCAT ((STUB_NAME, name, NULL));
2729 new_target = symbol_find (real_start);
2731 if (new_target == NULL)
2733 as_warn (_("Failed to find real start of function: %s\n"), name);
2734 new_target = symbolP;
2742 opcode_select (int width)
2749 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2750 as_bad (_("selected processor does not support THUMB opcodes"));
2753 /* No need to force the alignment, since we will have been
2754 coming from ARM mode, which is word-aligned. */
2755 record_alignment (now_seg, 1);
2762 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2763 as_bad (_("selected processor does not support ARM opcodes"));
2768 frag_align (2, 0, 0);
2770 record_alignment (now_seg, 1);
2775 as_bad (_("invalid instruction size selected (%d)"), width);
2780 s_arm (int ignore ATTRIBUTE_UNUSED)
2783 demand_empty_rest_of_line ();
2787 s_thumb (int ignore ATTRIBUTE_UNUSED)
2790 demand_empty_rest_of_line ();
2794 s_code (int unused ATTRIBUTE_UNUSED)
2798 temp = get_absolute_expression ();
2803 opcode_select (temp);
2807 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2812 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2814 /* If we are not already in thumb mode go into it, EVEN if
2815 the target processor does not support thumb instructions.
2816 This is used by gcc/config/arm/lib1funcs.asm for example
2817 to compile interworking support functions even if the
2818 target processor should not support interworking. */
2822 record_alignment (now_seg, 1);
2825 demand_empty_rest_of_line ();
2829 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2833 /* The following label is the name/address of the start of a Thumb function.
2834 We need to know this for the interworking support. */
2835 label_is_thumb_function_name = TRUE;
2838 /* Perform a .set directive, but also mark the alias as
2839 being a thumb function. */
2842 s_thumb_set (int equiv)
2844 /* XXX the following is a duplicate of the code for s_set() in read.c
2845 We cannot just call that code as we need to get at the symbol that
2852 /* Especial apologies for the random logic:
2853 This just grew, and could be parsed much more simply!
2855 name = input_line_pointer;
2856 delim = get_symbol_end ();
2857 end_name = input_line_pointer;
2860 if (*input_line_pointer != ',')
2863 as_bad (_("expected comma after name \"%s\""), name);
2865 ignore_rest_of_line ();
2869 input_line_pointer++;
2872 if (name[0] == '.' && name[1] == '\0')
2874 /* XXX - this should not happen to .thumb_set. */
2878 if ((symbolP = symbol_find (name)) == NULL
2879 && (symbolP = md_undefined_symbol (name)) == NULL)
2882 /* When doing symbol listings, play games with dummy fragments living
2883 outside the normal fragment chain to record the file and line info
2885 if (listing & LISTING_SYMBOLS)
2887 extern struct list_info_struct * listing_tail;
2888 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2890 memset (dummy_frag, 0, sizeof (fragS));
2891 dummy_frag->fr_type = rs_fill;
2892 dummy_frag->line = listing_tail;
2893 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2894 dummy_frag->fr_symbol = symbolP;
2898 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2901 /* "set" symbols are local unless otherwise specified. */
2902 SF_SET_LOCAL (symbolP);
2903 #endif /* OBJ_COFF */
2904 } /* Make a new symbol. */
2906 symbol_table_insert (symbolP);
2911 && S_IS_DEFINED (symbolP)
2912 && S_GET_SEGMENT (symbolP) != reg_section)
2913 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2915 pseudo_set (symbolP);
2917 demand_empty_rest_of_line ();
2919 /* XXX Now we come to the Thumb specific bit of code. */
2921 THUMB_SET_FUNC (symbolP, 1);
2922 ARM_SET_THUMB (symbolP, 1);
2923 #if defined OBJ_ELF || defined OBJ_COFF
2924 ARM_SET_INTERWORK (symbolP, support_interwork);
2928 /* Directives: Mode selection. */
2930 /* .syntax [unified|divided] - choose the new unified syntax
2931 (same for Arm and Thumb encoding, modulo slight differences in what
2932 can be represented) or the old divergent syntax for each mode. */
2934 s_syntax (int unused ATTRIBUTE_UNUSED)
2938 name = input_line_pointer;
2939 delim = get_symbol_end ();
2941 if (!strcasecmp (name, "unified"))
2942 unified_syntax = TRUE;
2943 else if (!strcasecmp (name, "divided"))
2944 unified_syntax = FALSE;
2947 as_bad (_("unrecognized syntax mode \"%s\""), name);
2950 *input_line_pointer = delim;
2951 demand_empty_rest_of_line ();
2954 /* Directives: sectioning and alignment. */
2956 /* Same as s_align_ptwo but align 0 => align 2. */
2959 s_align (int unused ATTRIBUTE_UNUSED)
2964 long max_alignment = 15;
2966 temp = get_absolute_expression ();
2967 if (temp > max_alignment)
2968 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2971 as_bad (_("alignment negative. 0 assumed."));
2975 if (*input_line_pointer == ',')
2977 input_line_pointer++;
2978 temp_fill = get_absolute_expression ();
2990 /* Only make a frag if we HAVE to. */
2991 if (temp && !need_pass_2)
2993 if (!fill_p && subseg_text_p (now_seg))
2994 frag_align_code (temp, 0);
2996 frag_align (temp, (int) temp_fill, 0);
2998 demand_empty_rest_of_line ();
3000 record_alignment (now_seg, temp);
3004 s_bss (int ignore ATTRIBUTE_UNUSED)
3006 /* We don't support putting frags in the BSS segment, we fake it by
3007 marking in_bss, then looking at s_skip for clues. */
3008 subseg_set (bss_section, 0);
3009 demand_empty_rest_of_line ();
3011 #ifdef md_elf_section_change_hook
3012 md_elf_section_change_hook ();
3017 s_even (int ignore ATTRIBUTE_UNUSED)
3019 /* Never make frag if expect extra pass. */
3021 frag_align (1, 0, 0);
3023 record_alignment (now_seg, 1);
3025 demand_empty_rest_of_line ();
3028 /* Directives: CodeComposer Studio. */
3030 /* .ref (for CodeComposer Studio syntax only). */
3032 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3034 if (codecomposer_syntax)
3035 ignore_rest_of_line ();
3037 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3040 /* If name is not NULL, then it is used for marking the beginning of a
3041 function, wherease if it is NULL then it means the function end. */
3043 asmfunc_debug (const char * name)
3045 static const char * last_name = NULL;
3049 gas_assert (last_name == NULL);
3052 if (debug_type == DEBUG_STABS)
3053 stabs_generate_asm_func (name, name);
3057 gas_assert (last_name != NULL);
3059 if (debug_type == DEBUG_STABS)
3060 stabs_generate_asm_endfunc (last_name, last_name);
3067 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3069 if (codecomposer_syntax)
3071 switch (asmfunc_state)
3073 case OUTSIDE_ASMFUNC:
3074 asmfunc_state = WAITING_ASMFUNC_NAME;
3077 case WAITING_ASMFUNC_NAME:
3078 as_bad (_(".asmfunc repeated."));
3081 case WAITING_ENDASMFUNC:
3082 as_bad (_(".asmfunc without function."));
3085 demand_empty_rest_of_line ();
3088 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3092 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3094 if (codecomposer_syntax)
3096 switch (asmfunc_state)
3098 case OUTSIDE_ASMFUNC:
3099 as_bad (_(".endasmfunc without a .asmfunc."));
3102 case WAITING_ASMFUNC_NAME:
3103 as_bad (_(".endasmfunc without function."));
3106 case WAITING_ENDASMFUNC:
3107 asmfunc_state = OUTSIDE_ASMFUNC;
3108 asmfunc_debug (NULL);
3111 demand_empty_rest_of_line ();
3114 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3118 s_ccs_def (int name)
3120 if (codecomposer_syntax)
3123 as_bad (_(".def pseudo-op only available with -mccs flag."));
3126 /* Directives: Literal pools. */
3128 static literal_pool *
3129 find_literal_pool (void)
3131 literal_pool * pool;
3133 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3135 if (pool->section == now_seg
3136 && pool->sub_section == now_subseg)
3143 static literal_pool *
3144 find_or_make_literal_pool (void)
3146 /* Next literal pool ID number. */
3147 static unsigned int latest_pool_num = 1;
3148 literal_pool * pool;
3150 pool = find_literal_pool ();
3154 /* Create a new pool. */
3155 pool = (literal_pool *) xmalloc (sizeof (* pool));
3159 pool->next_free_entry = 0;
3160 pool->section = now_seg;
3161 pool->sub_section = now_subseg;
3162 pool->next = list_of_pools;
3163 pool->symbol = NULL;
3164 pool->alignment = 2;
3166 /* Add it to the list. */
3167 list_of_pools = pool;
3170 /* New pools, and emptied pools, will have a NULL symbol. */
3171 if (pool->symbol == NULL)
3173 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3174 (valueT) 0, &zero_address_frag);
3175 pool->id = latest_pool_num ++;
3182 /* Add the literal in the global 'inst'
3183 structure to the relevant literal pool. */
3186 add_to_lit_pool (unsigned int nbytes)
3188 #define PADDING_SLOT 0x1
3189 #define LIT_ENTRY_SIZE_MASK 0xFF
3190 literal_pool * pool;
3191 unsigned int entry, pool_size = 0;
3192 bfd_boolean padding_slot_p = FALSE;
3198 imm1 = inst.operands[1].imm;
3199 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3200 : inst.reloc.exp.X_unsigned ? 0
3201 : ((int64_t)(imm1)) >> 32);
3202 if (target_big_endian)
3205 imm2 = inst.operands[1].imm;
3209 pool = find_or_make_literal_pool ();
3211 /* Check if this literal value is already in the pool. */
3212 for (entry = 0; entry < pool->next_free_entry; entry ++)
3216 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3217 && (inst.reloc.exp.X_op == O_constant)
3218 && (pool->literals[entry].X_add_number
3219 == inst.reloc.exp.X_add_number)
3220 && (pool->literals[entry].X_md == nbytes)
3221 && (pool->literals[entry].X_unsigned
3222 == inst.reloc.exp.X_unsigned))
3225 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3226 && (inst.reloc.exp.X_op == O_symbol)
3227 && (pool->literals[entry].X_add_number
3228 == inst.reloc.exp.X_add_number)
3229 && (pool->literals[entry].X_add_symbol
3230 == inst.reloc.exp.X_add_symbol)
3231 && (pool->literals[entry].X_op_symbol
3232 == inst.reloc.exp.X_op_symbol)
3233 && (pool->literals[entry].X_md == nbytes))
3236 else if ((nbytes == 8)
3237 && !(pool_size & 0x7)
3238 && ((entry + 1) != pool->next_free_entry)
3239 && (pool->literals[entry].X_op == O_constant)
3240 && (pool->literals[entry].X_add_number == imm1)
3241 && (pool->literals[entry].X_unsigned
3242 == inst.reloc.exp.X_unsigned)
3243 && (pool->literals[entry + 1].X_op == O_constant)
3244 && (pool->literals[entry + 1].X_add_number == imm2)
3245 && (pool->literals[entry + 1].X_unsigned
3246 == inst.reloc.exp.X_unsigned))
3249 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3250 if (padding_slot_p && (nbytes == 4))
3256 /* Do we need to create a new entry? */
3257 if (entry == pool->next_free_entry)
3259 if (entry >= MAX_LITERAL_POOL_SIZE)
3261 inst.error = _("literal pool overflow");
3267 /* For 8-byte entries, we align to an 8-byte boundary,
3268 and split it into two 4-byte entries, because on 32-bit
3269 host, 8-byte constants are treated as big num, thus
3270 saved in "generic_bignum" which will be overwritten
3271 by later assignments.
3273 We also need to make sure there is enough space for
3276 We also check to make sure the literal operand is a
3278 if (!(inst.reloc.exp.X_op == O_constant)
3279 || (inst.reloc.exp.X_op == O_big))
3281 inst.error = _("invalid type for literal pool");
3284 else if (pool_size & 0x7)
3286 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3288 inst.error = _("literal pool overflow");
3292 pool->literals[entry] = inst.reloc.exp;
3293 pool->literals[entry].X_add_number = 0;
3294 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3295 pool->next_free_entry += 1;
3298 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3300 inst.error = _("literal pool overflow");
3304 pool->literals[entry] = inst.reloc.exp;
3305 pool->literals[entry].X_op = O_constant;
3306 pool->literals[entry].X_add_number = imm1;
3307 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3308 pool->literals[entry++].X_md = 4;
3309 pool->literals[entry] = inst.reloc.exp;
3310 pool->literals[entry].X_op = O_constant;
3311 pool->literals[entry].X_add_number = imm2;
3312 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3313 pool->literals[entry].X_md = 4;
3314 pool->alignment = 3;
3315 pool->next_free_entry += 1;
3319 pool->literals[entry] = inst.reloc.exp;
3320 pool->literals[entry].X_md = 4;
3324 /* PR ld/12974: Record the location of the first source line to reference
3325 this entry in the literal pool. If it turns out during linking that the
3326 symbol does not exist we will be able to give an accurate line number for
3327 the (first use of the) missing reference. */
3328 if (debug_type == DEBUG_DWARF2)
3329 dwarf2_where (pool->locs + entry);
3331 pool->next_free_entry += 1;
3333 else if (padding_slot_p)
3335 pool->literals[entry] = inst.reloc.exp;
3336 pool->literals[entry].X_md = nbytes;
3339 inst.reloc.exp.X_op = O_symbol;
3340 inst.reloc.exp.X_add_number = pool_size;
3341 inst.reloc.exp.X_add_symbol = pool->symbol;
3347 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
3349 bfd_boolean ret = TRUE;
3351 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3353 const char *label = rest;
3355 while (!is_end_of_line[(int) label[-1]])
3360 as_bad (_("Invalid label '%s'"), label);
3364 asmfunc_debug (label);
3366 asmfunc_state = WAITING_ENDASMFUNC;
3372 /* Can't use symbol_new here, so have to create a symbol and then at
3373 a later date assign it a value. Thats what these functions do. */
3376 symbol_locate (symbolS * symbolP,
3377 const char * name, /* It is copied, the caller can modify. */
3378 segT segment, /* Segment identifier (SEG_<something>). */
3379 valueT valu, /* Symbol value. */
3380 fragS * frag) /* Associated fragment. */
3382 unsigned int name_length;
3383 char * preserved_copy_of_name;
3385 name_length = strlen (name) + 1; /* +1 for \0. */
3386 obstack_grow (¬es, name, name_length);
3387 preserved_copy_of_name = (char *) obstack_finish (¬es);
3389 #ifdef tc_canonicalize_symbol_name
3390 preserved_copy_of_name =
3391 tc_canonicalize_symbol_name (preserved_copy_of_name);
3394 S_SET_NAME (symbolP, preserved_copy_of_name);
3396 S_SET_SEGMENT (symbolP, segment);
3397 S_SET_VALUE (symbolP, valu);
3398 symbol_clear_list_pointers (symbolP);
3400 symbol_set_frag (symbolP, frag);
3402 /* Link to end of symbol chain. */
3404 extern int symbol_table_frozen;
3406 if (symbol_table_frozen)
3410 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3412 obj_symbol_new_hook (symbolP);
3414 #ifdef tc_symbol_new_hook
3415 tc_symbol_new_hook (symbolP);
3419 verify_symbol_chain (symbol_rootP, symbol_lastP);
3420 #endif /* DEBUG_SYMS */
3424 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3427 literal_pool * pool;
3430 pool = find_literal_pool ();
3432 || pool->symbol == NULL
3433 || pool->next_free_entry == 0)
3436 /* Align pool as you have word accesses.
3437 Only make a frag if we have to. */
3439 frag_align (pool->alignment, 0, 0);
3441 record_alignment (now_seg, 2);
3444 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3445 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3447 sprintf (sym_name, "$$lit_\002%x", pool->id);
3449 symbol_locate (pool->symbol, sym_name, now_seg,
3450 (valueT) frag_now_fix (), frag_now);
3451 symbol_table_insert (pool->symbol);
3453 ARM_SET_THUMB (pool->symbol, thumb_mode);
3455 #if defined OBJ_COFF || defined OBJ_ELF
3456 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3459 for (entry = 0; entry < pool->next_free_entry; entry ++)
3462 if (debug_type == DEBUG_DWARF2)
3463 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3465 /* First output the expression in the instruction to the pool. */
3466 emit_expr (&(pool->literals[entry]),
3467 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3470 /* Mark the pool as empty. */
3471 pool->next_free_entry = 0;
3472 pool->symbol = NULL;
3476 /* Forward declarations for functions below, in the MD interface
3478 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3479 static valueT create_unwind_entry (int);
3480 static void start_unwind_section (const segT, int);
3481 static void add_unwind_opcode (valueT, int);
3482 static void flush_pending_unwind (void);
3484 /* Directives: Data. */
3487 s_arm_elf_cons (int nbytes)
3491 #ifdef md_flush_pending_output
3492 md_flush_pending_output ();
3495 if (is_it_end_of_statement ())
3497 demand_empty_rest_of_line ();
3501 #ifdef md_cons_align
3502 md_cons_align (nbytes);
3505 mapping_state (MAP_DATA);
3509 char *base = input_line_pointer;
3513 if (exp.X_op != O_symbol)
3514 emit_expr (&exp, (unsigned int) nbytes);
3517 char *before_reloc = input_line_pointer;
3518 reloc = parse_reloc (&input_line_pointer);
3521 as_bad (_("unrecognized relocation suffix"));
3522 ignore_rest_of_line ();
3525 else if (reloc == BFD_RELOC_UNUSED)
3526 emit_expr (&exp, (unsigned int) nbytes);
3529 reloc_howto_type *howto = (reloc_howto_type *)
3530 bfd_reloc_type_lookup (stdoutput,
3531 (bfd_reloc_code_real_type) reloc);
3532 int size = bfd_get_reloc_size (howto);
3534 if (reloc == BFD_RELOC_ARM_PLT32)
3536 as_bad (_("(plt) is only valid on branch targets"));
3537 reloc = BFD_RELOC_UNUSED;
3542 as_bad (_("%s relocations do not fit in %d bytes"),
3543 howto->name, nbytes);
3546 /* We've parsed an expression stopping at O_symbol.
3547 But there may be more expression left now that we
3548 have parsed the relocation marker. Parse it again.
3549 XXX Surely there is a cleaner way to do this. */
3550 char *p = input_line_pointer;
3552 char *save_buf = (char *) alloca (input_line_pointer - base);
3553 memcpy (save_buf, base, input_line_pointer - base);
3554 memmove (base + (input_line_pointer - before_reloc),
3555 base, before_reloc - base);
3557 input_line_pointer = base + (input_line_pointer-before_reloc);
3559 memcpy (base, save_buf, p - base);
3561 offset = nbytes - size;
3562 p = frag_more (nbytes);
3563 memset (p, 0, nbytes);
3564 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3565 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3570 while (*input_line_pointer++ == ',');
3572 /* Put terminator back into stream. */
3573 input_line_pointer --;
3574 demand_empty_rest_of_line ();
3577 /* Emit an expression containing a 32-bit thumb instruction.
3578 Implementation based on put_thumb32_insn. */
3581 emit_thumb32_expr (expressionS * exp)
3583 expressionS exp_high = *exp;
3585 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3586 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3587 exp->X_add_number &= 0xffff;
3588 emit_expr (exp, (unsigned int) THUMB_SIZE);
3591 /* Guess the instruction size based on the opcode. */
3594 thumb_insn_size (int opcode)
3596 if ((unsigned int) opcode < 0xe800u)
3598 else if ((unsigned int) opcode >= 0xe8000000u)
3605 emit_insn (expressionS *exp, int nbytes)
3609 if (exp->X_op == O_constant)
3614 size = thumb_insn_size (exp->X_add_number);
3618 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3620 as_bad (_(".inst.n operand too big. "\
3621 "Use .inst.w instead"));
3626 if (now_it.state == AUTOMATIC_IT_BLOCK)
3627 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3629 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3631 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3632 emit_thumb32_expr (exp);
3634 emit_expr (exp, (unsigned int) size);
3636 it_fsm_post_encode ();
3640 as_bad (_("cannot determine Thumb instruction size. " \
3641 "Use .inst.n/.inst.w instead"));
3644 as_bad (_("constant expression required"));
3649 /* Like s_arm_elf_cons but do not use md_cons_align and
3650 set the mapping state to MAP_ARM/MAP_THUMB. */
3653 s_arm_elf_inst (int nbytes)
3655 if (is_it_end_of_statement ())
3657 demand_empty_rest_of_line ();
3661 /* Calling mapping_state () here will not change ARM/THUMB,
3662 but will ensure not to be in DATA state. */
3665 mapping_state (MAP_THUMB);
3670 as_bad (_("width suffixes are invalid in ARM mode"));
3671 ignore_rest_of_line ();
3677 mapping_state (MAP_ARM);
3686 if (! emit_insn (& exp, nbytes))
3688 ignore_rest_of_line ();
3692 while (*input_line_pointer++ == ',');
3694 /* Put terminator back into stream. */
3695 input_line_pointer --;
3696 demand_empty_rest_of_line ();
3699 /* Parse a .rel31 directive. */
3702 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3709 if (*input_line_pointer == '1')
3710 highbit = 0x80000000;
3711 else if (*input_line_pointer != '0')
3712 as_bad (_("expected 0 or 1"));
3714 input_line_pointer++;
3715 if (*input_line_pointer != ',')
3716 as_bad (_("missing comma"));
3717 input_line_pointer++;
3719 #ifdef md_flush_pending_output
3720 md_flush_pending_output ();
3723 #ifdef md_cons_align
3727 mapping_state (MAP_DATA);
3732 md_number_to_chars (p, highbit, 4);
3733 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3734 BFD_RELOC_ARM_PREL31);
3736 demand_empty_rest_of_line ();
3739 /* Directives: AEABI stack-unwind tables. */
3741 /* Parse an unwind_fnstart directive. Simply records the current location. */
3744 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3746 demand_empty_rest_of_line ();
3747 if (unwind.proc_start)
3749 as_bad (_("duplicate .fnstart directive"));
3753 /* Mark the start of the function. */
3754 unwind.proc_start = expr_build_dot ();
3756 /* Reset the rest of the unwind info. */
3757 unwind.opcode_count = 0;
3758 unwind.table_entry = NULL;
3759 unwind.personality_routine = NULL;
3760 unwind.personality_index = -1;
3761 unwind.frame_size = 0;
3762 unwind.fp_offset = 0;
3763 unwind.fp_reg = REG_SP;
3765 unwind.sp_restored = 0;
3769 /* Parse a handlerdata directive. Creates the exception handling table entry
3770 for the function. */
3773 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3775 demand_empty_rest_of_line ();
3776 if (!unwind.proc_start)
3777 as_bad (MISSING_FNSTART);
3779 if (unwind.table_entry)
3780 as_bad (_("duplicate .handlerdata directive"));
3782 create_unwind_entry (1);
3785 /* Parse an unwind_fnend directive. Generates the index table entry. */
3788 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3793 unsigned int marked_pr_dependency;
3795 demand_empty_rest_of_line ();
3797 if (!unwind.proc_start)
3799 as_bad (_(".fnend directive without .fnstart"));
3803 /* Add eh table entry. */
3804 if (unwind.table_entry == NULL)
3805 val = create_unwind_entry (0);
3809 /* Add index table entry. This is two words. */
3810 start_unwind_section (unwind.saved_seg, 1);
3811 frag_align (2, 0, 0);
3812 record_alignment (now_seg, 2);
3814 ptr = frag_more (8);
3816 where = frag_now_fix () - 8;
3818 /* Self relative offset of the function start. */
3819 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3820 BFD_RELOC_ARM_PREL31);
3822 /* Indicate dependency on EHABI-defined personality routines to the
3823 linker, if it hasn't been done already. */
3824 marked_pr_dependency
3825 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3826 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3827 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3829 static const char *const name[] =
3831 "__aeabi_unwind_cpp_pr0",
3832 "__aeabi_unwind_cpp_pr1",
3833 "__aeabi_unwind_cpp_pr2"
3835 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3836 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3837 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3838 |= 1 << unwind.personality_index;
3842 /* Inline exception table entry. */
3843 md_number_to_chars (ptr + 4, val, 4);
3845 /* Self relative offset of the table entry. */
3846 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3847 BFD_RELOC_ARM_PREL31);
3849 /* Restore the original section. */
3850 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3852 unwind.proc_start = NULL;
3856 /* Parse an unwind_cantunwind directive. */
3859 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3861 demand_empty_rest_of_line ();
3862 if (!unwind.proc_start)
3863 as_bad (MISSING_FNSTART);
3865 if (unwind.personality_routine || unwind.personality_index != -1)
3866 as_bad (_("personality routine specified for cantunwind frame"));
3868 unwind.personality_index = -2;
3872 /* Parse a personalityindex directive. */
3875 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3879 if (!unwind.proc_start)
3880 as_bad (MISSING_FNSTART);
3882 if (unwind.personality_routine || unwind.personality_index != -1)
3883 as_bad (_("duplicate .personalityindex directive"));
3887 if (exp.X_op != O_constant
3888 || exp.X_add_number < 0 || exp.X_add_number > 15)
3890 as_bad (_("bad personality routine number"));
3891 ignore_rest_of_line ();
3895 unwind.personality_index = exp.X_add_number;
3897 demand_empty_rest_of_line ();
3901 /* Parse a personality directive. */
3904 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3908 if (!unwind.proc_start)
3909 as_bad (MISSING_FNSTART);
3911 if (unwind.personality_routine || unwind.personality_index != -1)
3912 as_bad (_("duplicate .personality directive"));
3914 name = input_line_pointer;
3915 c = get_symbol_end ();
3916 p = input_line_pointer;
3917 unwind.personality_routine = symbol_find_or_make (name);
3919 demand_empty_rest_of_line ();
3923 /* Parse a directive saving core registers. */
3926 s_arm_unwind_save_core (void)
3932 range = parse_reg_list (&input_line_pointer);
3935 as_bad (_("expected register list"));
3936 ignore_rest_of_line ();
3940 demand_empty_rest_of_line ();
3942 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3943 into .unwind_save {..., sp...}. We aren't bothered about the value of
3944 ip because it is clobbered by calls. */
3945 if (unwind.sp_restored && unwind.fp_reg == 12
3946 && (range & 0x3000) == 0x1000)
3948 unwind.opcode_count--;
3949 unwind.sp_restored = 0;
3950 range = (range | 0x2000) & ~0x1000;
3951 unwind.pending_offset = 0;
3957 /* See if we can use the short opcodes. These pop a block of up to 8
3958 registers starting with r4, plus maybe r14. */
3959 for (n = 0; n < 8; n++)
3961 /* Break at the first non-saved register. */
3962 if ((range & (1 << (n + 4))) == 0)
3965 /* See if there are any other bits set. */
3966 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3968 /* Use the long form. */
3969 op = 0x8000 | ((range >> 4) & 0xfff);
3970 add_unwind_opcode (op, 2);
3974 /* Use the short form. */
3976 op = 0xa8; /* Pop r14. */
3978 op = 0xa0; /* Do not pop r14. */
3980 add_unwind_opcode (op, 1);
3987 op = 0xb100 | (range & 0xf);
3988 add_unwind_opcode (op, 2);
3991 /* Record the number of bytes pushed. */
3992 for (n = 0; n < 16; n++)
3994 if (range & (1 << n))
3995 unwind.frame_size += 4;
4000 /* Parse a directive saving FPA registers. */
4003 s_arm_unwind_save_fpa (int reg)
4009 /* Get Number of registers to transfer. */
4010 if (skip_past_comma (&input_line_pointer) != FAIL)
4013 exp.X_op = O_illegal;
4015 if (exp.X_op != O_constant)
4017 as_bad (_("expected , <constant>"));
4018 ignore_rest_of_line ();
4022 num_regs = exp.X_add_number;
4024 if (num_regs < 1 || num_regs > 4)
4026 as_bad (_("number of registers must be in the range [1:4]"));
4027 ignore_rest_of_line ();
4031 demand_empty_rest_of_line ();
4036 op = 0xb4 | (num_regs - 1);
4037 add_unwind_opcode (op, 1);
4042 op = 0xc800 | (reg << 4) | (num_regs - 1);
4043 add_unwind_opcode (op, 2);
4045 unwind.frame_size += num_regs * 12;
4049 /* Parse a directive saving VFP registers for ARMv6 and above. */
4052 s_arm_unwind_save_vfp_armv6 (void)
4057 int num_vfpv3_regs = 0;
4058 int num_regs_below_16;
4060 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4063 as_bad (_("expected register list"));
4064 ignore_rest_of_line ();
4068 demand_empty_rest_of_line ();
4070 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4071 than FSTMX/FLDMX-style ones). */
4073 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4075 num_vfpv3_regs = count;
4076 else if (start + count > 16)
4077 num_vfpv3_regs = start + count - 16;
4079 if (num_vfpv3_regs > 0)
4081 int start_offset = start > 16 ? start - 16 : 0;
4082 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4083 add_unwind_opcode (op, 2);
4086 /* Generate opcode for registers numbered in the range 0 .. 15. */
4087 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4088 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4089 if (num_regs_below_16 > 0)
4091 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4092 add_unwind_opcode (op, 2);
4095 unwind.frame_size += count * 8;
4099 /* Parse a directive saving VFP registers for pre-ARMv6. */
4102 s_arm_unwind_save_vfp (void)
4108 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
4111 as_bad (_("expected register list"));
4112 ignore_rest_of_line ();
4116 demand_empty_rest_of_line ();
4121 op = 0xb8 | (count - 1);
4122 add_unwind_opcode (op, 1);
4127 op = 0xb300 | (reg << 4) | (count - 1);
4128 add_unwind_opcode (op, 2);
4130 unwind.frame_size += count * 8 + 4;
4134 /* Parse a directive saving iWMMXt data registers. */
4137 s_arm_unwind_save_mmxwr (void)
4145 if (*input_line_pointer == '{')
4146 input_line_pointer++;
4150 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4154 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4159 as_tsktsk (_("register list not in ascending order"));
4162 if (*input_line_pointer == '-')
4164 input_line_pointer++;
4165 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4168 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4171 else if (reg >= hi_reg)
4173 as_bad (_("bad register range"));
4176 for (; reg < hi_reg; reg++)
4180 while (skip_past_comma (&input_line_pointer) != FAIL);
4182 skip_past_char (&input_line_pointer, '}');
4184 demand_empty_rest_of_line ();
4186 /* Generate any deferred opcodes because we're going to be looking at
4188 flush_pending_unwind ();
4190 for (i = 0; i < 16; i++)
4192 if (mask & (1 << i))
4193 unwind.frame_size += 8;
4196 /* Attempt to combine with a previous opcode. We do this because gcc
4197 likes to output separate unwind directives for a single block of
4199 if (unwind.opcode_count > 0)
4201 i = unwind.opcodes[unwind.opcode_count - 1];
4202 if ((i & 0xf8) == 0xc0)
4205 /* Only merge if the blocks are contiguous. */
4208 if ((mask & 0xfe00) == (1 << 9))
4210 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4211 unwind.opcode_count--;
4214 else if (i == 6 && unwind.opcode_count >= 2)
4216 i = unwind.opcodes[unwind.opcode_count - 2];
4220 op = 0xffff << (reg - 1);
4222 && ((mask & op) == (1u << (reg - 1))))
4224 op = (1 << (reg + i + 1)) - 1;
4225 op &= ~((1 << reg) - 1);
4227 unwind.opcode_count -= 2;
4234 /* We want to generate opcodes in the order the registers have been
4235 saved, ie. descending order. */
4236 for (reg = 15; reg >= -1; reg--)
4238 /* Save registers in blocks. */
4240 || !(mask & (1 << reg)))
4242 /* We found an unsaved reg. Generate opcodes to save the
4249 op = 0xc0 | (hi_reg - 10);
4250 add_unwind_opcode (op, 1);
4255 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4256 add_unwind_opcode (op, 2);
4265 ignore_rest_of_line ();
4269 s_arm_unwind_save_mmxwcg (void)
4276 if (*input_line_pointer == '{')
4277 input_line_pointer++;
4279 skip_whitespace (input_line_pointer);
4283 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4287 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4293 as_tsktsk (_("register list not in ascending order"));
4296 if (*input_line_pointer == '-')
4298 input_line_pointer++;
4299 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4302 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4305 else if (reg >= hi_reg)
4307 as_bad (_("bad register range"));
4310 for (; reg < hi_reg; reg++)
4314 while (skip_past_comma (&input_line_pointer) != FAIL);
4316 skip_past_char (&input_line_pointer, '}');
4318 demand_empty_rest_of_line ();
4320 /* Generate any deferred opcodes because we're going to be looking at
4322 flush_pending_unwind ();
4324 for (reg = 0; reg < 16; reg++)
4326 if (mask & (1 << reg))
4327 unwind.frame_size += 4;
4330 add_unwind_opcode (op, 2);
4333 ignore_rest_of_line ();
4337 /* Parse an unwind_save directive.
4338 If the argument is non-zero, this is a .vsave directive. */
4341 s_arm_unwind_save (int arch_v6)
4344 struct reg_entry *reg;
4345 bfd_boolean had_brace = FALSE;
4347 if (!unwind.proc_start)
4348 as_bad (MISSING_FNSTART);
4350 /* Figure out what sort of save we have. */
4351 peek = input_line_pointer;
4359 reg = arm_reg_parse_multi (&peek);
4363 as_bad (_("register expected"));
4364 ignore_rest_of_line ();
4373 as_bad (_("FPA .unwind_save does not take a register list"));
4374 ignore_rest_of_line ();
4377 input_line_pointer = peek;
4378 s_arm_unwind_save_fpa (reg->number);
4382 s_arm_unwind_save_core ();
4387 s_arm_unwind_save_vfp_armv6 ();
4389 s_arm_unwind_save_vfp ();
4392 case REG_TYPE_MMXWR:
4393 s_arm_unwind_save_mmxwr ();
4396 case REG_TYPE_MMXWCG:
4397 s_arm_unwind_save_mmxwcg ();
4401 as_bad (_(".unwind_save does not support this kind of register"));
4402 ignore_rest_of_line ();
4407 /* Parse an unwind_movsp directive. */
4410 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4416 if (!unwind.proc_start)
4417 as_bad (MISSING_FNSTART);
4419 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4422 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4423 ignore_rest_of_line ();
4427 /* Optional constant. */
4428 if (skip_past_comma (&input_line_pointer) != FAIL)
4430 if (immediate_for_directive (&offset) == FAIL)
4436 demand_empty_rest_of_line ();
4438 if (reg == REG_SP || reg == REG_PC)
4440 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4444 if (unwind.fp_reg != REG_SP)
4445 as_bad (_("unexpected .unwind_movsp directive"));
4447 /* Generate opcode to restore the value. */
4449 add_unwind_opcode (op, 1);
4451 /* Record the information for later. */
4452 unwind.fp_reg = reg;
4453 unwind.fp_offset = unwind.frame_size - offset;
4454 unwind.sp_restored = 1;
4457 /* Parse an unwind_pad directive. */
4460 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4464 if (!unwind.proc_start)
4465 as_bad (MISSING_FNSTART);
4467 if (immediate_for_directive (&offset) == FAIL)
4472 as_bad (_("stack increment must be multiple of 4"));
4473 ignore_rest_of_line ();
4477 /* Don't generate any opcodes, just record the details for later. */
4478 unwind.frame_size += offset;
4479 unwind.pending_offset += offset;
4481 demand_empty_rest_of_line ();
4484 /* Parse an unwind_setfp directive. */
4487 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4493 if (!unwind.proc_start)
4494 as_bad (MISSING_FNSTART);
4496 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4497 if (skip_past_comma (&input_line_pointer) == FAIL)
4500 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4502 if (fp_reg == FAIL || sp_reg == FAIL)
4504 as_bad (_("expected <reg>, <reg>"));
4505 ignore_rest_of_line ();
4509 /* Optional constant. */
4510 if (skip_past_comma (&input_line_pointer) != FAIL)
4512 if (immediate_for_directive (&offset) == FAIL)
4518 demand_empty_rest_of_line ();
4520 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4522 as_bad (_("register must be either sp or set by a previous"
4523 "unwind_movsp directive"));
4527 /* Don't generate any opcodes, just record the information for later. */
4528 unwind.fp_reg = fp_reg;
4530 if (sp_reg == REG_SP)
4531 unwind.fp_offset = unwind.frame_size - offset;
4533 unwind.fp_offset -= offset;
4536 /* Parse an unwind_raw directive. */
4539 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4542 /* This is an arbitrary limit. */
4543 unsigned char op[16];
4546 if (!unwind.proc_start)
4547 as_bad (MISSING_FNSTART);
4550 if (exp.X_op == O_constant
4551 && skip_past_comma (&input_line_pointer) != FAIL)
4553 unwind.frame_size += exp.X_add_number;
4557 exp.X_op = O_illegal;
4559 if (exp.X_op != O_constant)
4561 as_bad (_("expected <offset>, <opcode>"));
4562 ignore_rest_of_line ();
4568 /* Parse the opcode. */
4573 as_bad (_("unwind opcode too long"));
4574 ignore_rest_of_line ();
4576 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4578 as_bad (_("invalid unwind opcode"));
4579 ignore_rest_of_line ();
4582 op[count++] = exp.X_add_number;
4584 /* Parse the next byte. */
4585 if (skip_past_comma (&input_line_pointer) == FAIL)
4591 /* Add the opcode bytes in reverse order. */
4593 add_unwind_opcode (op[count], 1);
4595 demand_empty_rest_of_line ();
4599 /* Parse a .eabi_attribute directive. */
4602 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4604 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4606 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4607 attributes_set_explicitly[tag] = 1;
4610 /* Emit a tls fix for the symbol. */
4613 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4617 #ifdef md_flush_pending_output
4618 md_flush_pending_output ();
4621 #ifdef md_cons_align
4625 /* Since we're just labelling the code, there's no need to define a
4628 p = obstack_next_free (&frchain_now->frch_obstack);
4629 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4630 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4631 : BFD_RELOC_ARM_TLS_DESCSEQ);
4633 #endif /* OBJ_ELF */
4635 static void s_arm_arch (int);
4636 static void s_arm_object_arch (int);
4637 static void s_arm_cpu (int);
4638 static void s_arm_fpu (int);
4639 static void s_arm_arch_extension (int);
4644 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4651 if (exp.X_op == O_symbol)
4652 exp.X_op = O_secrel;
4654 emit_expr (&exp, 4);
4656 while (*input_line_pointer++ == ',');
4658 input_line_pointer--;
4659 demand_empty_rest_of_line ();
4663 /* This table describes all the machine specific pseudo-ops the assembler
4664 has to support. The fields are:
4665 pseudo-op name without dot
4666 function to call to execute this pseudo-op
4667 Integer arg to pass to the function. */
4669 const pseudo_typeS md_pseudo_table[] =
4671 /* Never called because '.req' does not start a line. */
4672 { "req", s_req, 0 },
4673 /* Following two are likewise never called. */
4676 { "unreq", s_unreq, 0 },
4677 { "bss", s_bss, 0 },
4678 { "align", s_align, 0 },
4679 { "arm", s_arm, 0 },
4680 { "thumb", s_thumb, 0 },
4681 { "code", s_code, 0 },
4682 { "force_thumb", s_force_thumb, 0 },
4683 { "thumb_func", s_thumb_func, 0 },
4684 { "thumb_set", s_thumb_set, 0 },
4685 { "even", s_even, 0 },
4686 { "ltorg", s_ltorg, 0 },
4687 { "pool", s_ltorg, 0 },
4688 { "syntax", s_syntax, 0 },
4689 { "cpu", s_arm_cpu, 0 },
4690 { "arch", s_arm_arch, 0 },
4691 { "object_arch", s_arm_object_arch, 0 },
4692 { "fpu", s_arm_fpu, 0 },
4693 { "arch_extension", s_arm_arch_extension, 0 },
4695 { "word", s_arm_elf_cons, 4 },
4696 { "long", s_arm_elf_cons, 4 },
4697 { "inst.n", s_arm_elf_inst, 2 },
4698 { "inst.w", s_arm_elf_inst, 4 },
4699 { "inst", s_arm_elf_inst, 0 },
4700 { "rel31", s_arm_rel31, 0 },
4701 { "fnstart", s_arm_unwind_fnstart, 0 },
4702 { "fnend", s_arm_unwind_fnend, 0 },
4703 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4704 { "personality", s_arm_unwind_personality, 0 },
4705 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4706 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4707 { "save", s_arm_unwind_save, 0 },
4708 { "vsave", s_arm_unwind_save, 1 },
4709 { "movsp", s_arm_unwind_movsp, 0 },
4710 { "pad", s_arm_unwind_pad, 0 },
4711 { "setfp", s_arm_unwind_setfp, 0 },
4712 { "unwind_raw", s_arm_unwind_raw, 0 },
4713 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4714 { "tlsdescseq", s_arm_tls_descseq, 0 },
4718 /* These are used for dwarf. */
4722 /* These are used for dwarf2. */
4723 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4724 { "loc", dwarf2_directive_loc, 0 },
4725 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4727 { "extend", float_cons, 'x' },
4728 { "ldouble", float_cons, 'x' },
4729 { "packed", float_cons, 'p' },
4731 {"secrel32", pe_directive_secrel, 0},
4734 /* These are for compatibility with CodeComposer Studio. */
4735 {"ref", s_ccs_ref, 0},
4736 {"def", s_ccs_def, 0},
4737 {"asmfunc", s_ccs_asmfunc, 0},
4738 {"endasmfunc", s_ccs_endasmfunc, 0},
4743 /* Parser functions used exclusively in instruction operands. */
4745 /* Generic immediate-value read function for use in insn parsing.
4746 STR points to the beginning of the immediate (the leading #);
4747 VAL receives the value; if the value is outside [MIN, MAX]
4748 issue an error. PREFIX_OPT is true if the immediate prefix is
4752 parse_immediate (char **str, int *val, int min, int max,
4753 bfd_boolean prefix_opt)
4756 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4757 if (exp.X_op != O_constant)
4759 inst.error = _("constant expression required");
4763 if (exp.X_add_number < min || exp.X_add_number > max)
4765 inst.error = _("immediate value out of range");
4769 *val = exp.X_add_number;
4773 /* Less-generic immediate-value read function with the possibility of loading a
4774 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4775 instructions. Puts the result directly in inst.operands[i]. */
4778 parse_big_immediate (char **str, int i, expressionS *in_exp,
4779 bfd_boolean allow_symbol_p)
4782 expressionS *exp_p = in_exp ? in_exp : &exp;
4785 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4787 if (exp_p->X_op == O_constant)
4789 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4790 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4791 O_constant. We have to be careful not to break compilation for
4792 32-bit X_add_number, though. */
4793 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4795 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4796 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4798 inst.operands[i].regisimm = 1;
4801 else if (exp_p->X_op == O_big
4802 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4804 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4806 /* Bignums have their least significant bits in
4807 generic_bignum[0]. Make sure we put 32 bits in imm and
4808 32 bits in reg, in a (hopefully) portable way. */
4809 gas_assert (parts != 0);
4811 /* Make sure that the number is not too big.
4812 PR 11972: Bignums can now be sign-extended to the
4813 size of a .octa so check that the out of range bits
4814 are all zero or all one. */
4815 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4817 LITTLENUM_TYPE m = -1;
4819 if (generic_bignum[parts * 2] != 0
4820 && generic_bignum[parts * 2] != m)
4823 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4824 if (generic_bignum[j] != generic_bignum[j-1])
4828 inst.operands[i].imm = 0;
4829 for (j = 0; j < parts; j++, idx++)
4830 inst.operands[i].imm |= generic_bignum[idx]
4831 << (LITTLENUM_NUMBER_OF_BITS * j);
4832 inst.operands[i].reg = 0;
4833 for (j = 0; j < parts; j++, idx++)
4834 inst.operands[i].reg |= generic_bignum[idx]
4835 << (LITTLENUM_NUMBER_OF_BITS * j);
4836 inst.operands[i].regisimm = 1;
4838 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4846 /* Returns the pseudo-register number of an FPA immediate constant,
4847 or FAIL if there isn't a valid constant here. */
4850 parse_fpa_immediate (char ** str)
4852 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4858 /* First try and match exact strings, this is to guarantee
4859 that some formats will work even for cross assembly. */
4861 for (i = 0; fp_const[i]; i++)
4863 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4867 *str += strlen (fp_const[i]);
4868 if (is_end_of_line[(unsigned char) **str])
4874 /* Just because we didn't get a match doesn't mean that the constant
4875 isn't valid, just that it is in a format that we don't
4876 automatically recognize. Try parsing it with the standard
4877 expression routines. */
4879 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4881 /* Look for a raw floating point number. */
4882 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4883 && is_end_of_line[(unsigned char) *save_in])
4885 for (i = 0; i < NUM_FLOAT_VALS; i++)
4887 for (j = 0; j < MAX_LITTLENUMS; j++)
4889 if (words[j] != fp_values[i][j])
4893 if (j == MAX_LITTLENUMS)
4901 /* Try and parse a more complex expression, this will probably fail
4902 unless the code uses a floating point prefix (eg "0f"). */
4903 save_in = input_line_pointer;
4904 input_line_pointer = *str;
4905 if (expression (&exp) == absolute_section
4906 && exp.X_op == O_big
4907 && exp.X_add_number < 0)
4909 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4911 if (gen_to_words (words, 5, (long) 15) == 0)
4913 for (i = 0; i < NUM_FLOAT_VALS; i++)
4915 for (j = 0; j < MAX_LITTLENUMS; j++)
4917 if (words[j] != fp_values[i][j])
4921 if (j == MAX_LITTLENUMS)
4923 *str = input_line_pointer;
4924 input_line_pointer = save_in;
4931 *str = input_line_pointer;
4932 input_line_pointer = save_in;
4933 inst.error = _("invalid FPA immediate expression");
4937 /* Returns 1 if a number has "quarter-precision" float format
4938 0baBbbbbbc defgh000 00000000 00000000. */
4941 is_quarter_float (unsigned imm)
4943 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4944 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4947 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4948 0baBbbbbbc defgh000 00000000 00000000.
4949 The zero and minus-zero cases need special handling, since they can't be
4950 encoded in the "quarter-precision" float format, but can nonetheless be
4951 loaded as integer constants. */
4954 parse_qfloat_immediate (char **ccp, int *immed)
4958 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4959 int found_fpchar = 0;
4961 skip_past_char (&str, '#');
4963 /* We must not accidentally parse an integer as a floating-point number. Make
4964 sure that the value we parse is not an integer by checking for special
4965 characters '.' or 'e'.
4966 FIXME: This is a horrible hack, but doing better is tricky because type
4967 information isn't in a very usable state at parse time. */
4969 skip_whitespace (fpnum);
4971 if (strncmp (fpnum, "0x", 2) == 0)
4975 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4976 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4986 if ((str = atof_ieee (str, 's', words)) != NULL)
4988 unsigned fpword = 0;
4991 /* Our FP word must be 32 bits (single-precision FP). */
4992 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4994 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4998 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5011 /* Shift operands. */
5014 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5017 struct asm_shift_name
5020 enum shift_kind kind;
5023 /* Third argument to parse_shift. */
5024 enum parse_shift_mode
5026 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5027 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5028 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5029 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5030 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5033 /* Parse a <shift> specifier on an ARM data processing instruction.
5034 This has three forms:
5036 (LSL|LSR|ASL|ASR|ROR) Rs
5037 (LSL|LSR|ASL|ASR|ROR) #imm
5040 Note that ASL is assimilated to LSL in the instruction encoding, and
5041 RRX to ROR #0 (which cannot be written as such). */
5044 parse_shift (char **str, int i, enum parse_shift_mode mode)
5046 const struct asm_shift_name *shift_name;
5047 enum shift_kind shift;
5052 for (p = *str; ISALPHA (*p); p++)
5057 inst.error = _("shift expression expected");
5061 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5064 if (shift_name == NULL)
5066 inst.error = _("shift expression expected");
5070 shift = shift_name->kind;
5074 case NO_SHIFT_RESTRICT:
5075 case SHIFT_IMMEDIATE: break;
5077 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5078 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5080 inst.error = _("'LSL' or 'ASR' required");
5085 case SHIFT_LSL_IMMEDIATE:
5086 if (shift != SHIFT_LSL)
5088 inst.error = _("'LSL' required");
5093 case SHIFT_ASR_IMMEDIATE:
5094 if (shift != SHIFT_ASR)
5096 inst.error = _("'ASR' required");
5104 if (shift != SHIFT_RRX)
5106 /* Whitespace can appear here if the next thing is a bare digit. */
5107 skip_whitespace (p);
5109 if (mode == NO_SHIFT_RESTRICT
5110 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5112 inst.operands[i].imm = reg;
5113 inst.operands[i].immisreg = 1;
5115 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5118 inst.operands[i].shift_kind = shift;
5119 inst.operands[i].shifted = 1;
5124 /* Parse a <shifter_operand> for an ARM data processing instruction:
5127 #<immediate>, <rotate>
5131 where <shift> is defined by parse_shift above, and <rotate> is a
5132 multiple of 2 between 0 and 30. Validation of immediate operands
5133 is deferred to md_apply_fix. */
5136 parse_shifter_operand (char **str, int i)
5141 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5143 inst.operands[i].reg = value;
5144 inst.operands[i].isreg = 1;
5146 /* parse_shift will override this if appropriate */
5147 inst.reloc.exp.X_op = O_constant;
5148 inst.reloc.exp.X_add_number = 0;
5150 if (skip_past_comma (str) == FAIL)
5153 /* Shift operation on register. */
5154 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5157 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5160 if (skip_past_comma (str) == SUCCESS)
5162 /* #x, y -- ie explicit rotation by Y. */
5163 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5166 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5168 inst.error = _("constant expression expected");
5172 value = exp.X_add_number;
5173 if (value < 0 || value > 30 || value % 2 != 0)
5175 inst.error = _("invalid rotation");
5178 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5180 inst.error = _("invalid constant");
5184 /* Encode as specified. */
5185 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5189 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5190 inst.reloc.pc_rel = 0;
5194 /* Group relocation information. Each entry in the table contains the
5195 textual name of the relocation as may appear in assembler source
5196 and must end with a colon.
5197 Along with this textual name are the relocation codes to be used if
5198 the corresponding instruction is an ALU instruction (ADD or SUB only),
5199 an LDR, an LDRS, or an LDC. */
5201 struct group_reloc_table_entry
5212 /* Varieties of non-ALU group relocation. */
5219 static struct group_reloc_table_entry group_reloc_table[] =
5220 { /* Program counter relative: */
5222 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5227 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5228 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5229 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5230 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5232 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5237 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5238 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5239 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5240 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5242 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5243 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5244 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5245 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5246 /* Section base relative */
5248 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5253 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5254 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5255 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5256 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5258 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5263 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5264 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5265 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5266 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5268 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5269 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5270 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5271 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5273 /* Given the address of a pointer pointing to the textual name of a group
5274 relocation as may appear in assembler source, attempt to find its details
5275 in group_reloc_table. The pointer will be updated to the character after
5276 the trailing colon. On failure, FAIL will be returned; SUCCESS
5277 otherwise. On success, *entry will be updated to point at the relevant
5278 group_reloc_table entry. */
5281 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5284 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5286 int length = strlen (group_reloc_table[i].name);
5288 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5289 && (*str)[length] == ':')
5291 *out = &group_reloc_table[i];
5292 *str += (length + 1);
5300 /* Parse a <shifter_operand> for an ARM data processing instruction
5301 (as for parse_shifter_operand) where group relocations are allowed:
5304 #<immediate>, <rotate>
5305 #:<group_reloc>:<expression>
5309 where <group_reloc> is one of the strings defined in group_reloc_table.
5310 The hashes are optional.
5312 Everything else is as for parse_shifter_operand. */
5314 static parse_operand_result
5315 parse_shifter_operand_group_reloc (char **str, int i)
5317 /* Determine if we have the sequence of characters #: or just :
5318 coming next. If we do, then we check for a group relocation.
5319 If we don't, punt the whole lot to parse_shifter_operand. */
5321 if (((*str)[0] == '#' && (*str)[1] == ':')
5322 || (*str)[0] == ':')
5324 struct group_reloc_table_entry *entry;
5326 if ((*str)[0] == '#')
5331 /* Try to parse a group relocation. Anything else is an error. */
5332 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5334 inst.error = _("unknown group relocation");
5335 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5338 /* We now have the group relocation table entry corresponding to
5339 the name in the assembler source. Next, we parse the expression. */
5340 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5341 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5343 /* Record the relocation type (always the ALU variant here). */
5344 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5345 gas_assert (inst.reloc.type != 0);
5347 return PARSE_OPERAND_SUCCESS;
5350 return parse_shifter_operand (str, i) == SUCCESS
5351 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5353 /* Never reached. */
5356 /* Parse a Neon alignment expression. Information is written to
5357 inst.operands[i]. We assume the initial ':' has been skipped.
5359 align .imm = align << 8, .immisalign=1, .preind=0 */
5360 static parse_operand_result
5361 parse_neon_alignment (char **str, int i)
5366 my_get_expression (&exp, &p, GE_NO_PREFIX);
5368 if (exp.X_op != O_constant)
5370 inst.error = _("alignment must be constant");
5371 return PARSE_OPERAND_FAIL;
5374 inst.operands[i].imm = exp.X_add_number << 8;
5375 inst.operands[i].immisalign = 1;
5376 /* Alignments are not pre-indexes. */
5377 inst.operands[i].preind = 0;
5380 return PARSE_OPERAND_SUCCESS;
5383 /* Parse all forms of an ARM address expression. Information is written
5384 to inst.operands[i] and/or inst.reloc.
5386 Preindexed addressing (.preind=1):
5388 [Rn, #offset] .reg=Rn .reloc.exp=offset
5389 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5390 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5391 .shift_kind=shift .reloc.exp=shift_imm
5393 These three may have a trailing ! which causes .writeback to be set also.
5395 Postindexed addressing (.postind=1, .writeback=1):
5397 [Rn], #offset .reg=Rn .reloc.exp=offset
5398 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5399 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5400 .shift_kind=shift .reloc.exp=shift_imm
5402 Unindexed addressing (.preind=0, .postind=0):
5404 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5408 [Rn]{!} shorthand for [Rn,#0]{!}
5409 =immediate .isreg=0 .reloc.exp=immediate
5410 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5412 It is the caller's responsibility to check for addressing modes not
5413 supported by the instruction, and to set inst.reloc.type. */
5415 static parse_operand_result
5416 parse_address_main (char **str, int i, int group_relocations,
5417 group_reloc_type group_type)
5422 if (skip_past_char (&p, '[') == FAIL)
5424 if (skip_past_char (&p, '=') == FAIL)
5426 /* Bare address - translate to PC-relative offset. */
5427 inst.reloc.pc_rel = 1;
5428 inst.operands[i].reg = REG_PC;
5429 inst.operands[i].isreg = 1;
5430 inst.operands[i].preind = 1;
5432 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5433 return PARSE_OPERAND_FAIL;
5435 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5436 /*allow_symbol_p=*/TRUE))
5437 return PARSE_OPERAND_FAIL;
5440 return PARSE_OPERAND_SUCCESS;
5443 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5444 skip_whitespace (p);
5446 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5448 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5449 return PARSE_OPERAND_FAIL;
5451 inst.operands[i].reg = reg;
5452 inst.operands[i].isreg = 1;
5454 if (skip_past_comma (&p) == SUCCESS)
5456 inst.operands[i].preind = 1;
5459 else if (*p == '-') p++, inst.operands[i].negative = 1;
5461 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5463 inst.operands[i].imm = reg;
5464 inst.operands[i].immisreg = 1;
5466 if (skip_past_comma (&p) == SUCCESS)
5467 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5468 return PARSE_OPERAND_FAIL;
5470 else if (skip_past_char (&p, ':') == SUCCESS)
5472 /* FIXME: '@' should be used here, but it's filtered out by generic
5473 code before we get to see it here. This may be subject to
5475 parse_operand_result result = parse_neon_alignment (&p, i);
5477 if (result != PARSE_OPERAND_SUCCESS)
5482 if (inst.operands[i].negative)
5484 inst.operands[i].negative = 0;
5488 if (group_relocations
5489 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5491 struct group_reloc_table_entry *entry;
5493 /* Skip over the #: or : sequence. */
5499 /* Try to parse a group relocation. Anything else is an
5501 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5503 inst.error = _("unknown group relocation");
5504 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5507 /* We now have the group relocation table entry corresponding to
5508 the name in the assembler source. Next, we parse the
5510 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5511 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5513 /* Record the relocation type. */
5517 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5521 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5525 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5532 if (inst.reloc.type == 0)
5534 inst.error = _("this group relocation is not allowed on this instruction");
5535 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5541 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5542 return PARSE_OPERAND_FAIL;
5543 /* If the offset is 0, find out if it's a +0 or -0. */
5544 if (inst.reloc.exp.X_op == O_constant
5545 && inst.reloc.exp.X_add_number == 0)
5547 skip_whitespace (q);
5551 skip_whitespace (q);
5554 inst.operands[i].negative = 1;
5559 else if (skip_past_char (&p, ':') == SUCCESS)
5561 /* FIXME: '@' should be used here, but it's filtered out by generic code
5562 before we get to see it here. This may be subject to change. */
5563 parse_operand_result result = parse_neon_alignment (&p, i);
5565 if (result != PARSE_OPERAND_SUCCESS)
5569 if (skip_past_char (&p, ']') == FAIL)
5571 inst.error = _("']' expected");
5572 return PARSE_OPERAND_FAIL;
5575 if (skip_past_char (&p, '!') == SUCCESS)
5576 inst.operands[i].writeback = 1;
5578 else if (skip_past_comma (&p) == SUCCESS)
5580 if (skip_past_char (&p, '{') == SUCCESS)
5582 /* [Rn], {expr} - unindexed, with option */
5583 if (parse_immediate (&p, &inst.operands[i].imm,
5584 0, 255, TRUE) == FAIL)
5585 return PARSE_OPERAND_FAIL;
5587 if (skip_past_char (&p, '}') == FAIL)
5589 inst.error = _("'}' expected at end of 'option' field");
5590 return PARSE_OPERAND_FAIL;
5592 if (inst.operands[i].preind)
5594 inst.error = _("cannot combine index with option");
5595 return PARSE_OPERAND_FAIL;
5598 return PARSE_OPERAND_SUCCESS;
5602 inst.operands[i].postind = 1;
5603 inst.operands[i].writeback = 1;
5605 if (inst.operands[i].preind)
5607 inst.error = _("cannot combine pre- and post-indexing");
5608 return PARSE_OPERAND_FAIL;
5612 else if (*p == '-') p++, inst.operands[i].negative = 1;
5614 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5616 /* We might be using the immediate for alignment already. If we
5617 are, OR the register number into the low-order bits. */
5618 if (inst.operands[i].immisalign)
5619 inst.operands[i].imm |= reg;
5621 inst.operands[i].imm = reg;
5622 inst.operands[i].immisreg = 1;
5624 if (skip_past_comma (&p) == SUCCESS)
5625 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5626 return PARSE_OPERAND_FAIL;
5631 if (inst.operands[i].negative)
5633 inst.operands[i].negative = 0;
5636 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5637 return PARSE_OPERAND_FAIL;
5638 /* If the offset is 0, find out if it's a +0 or -0. */
5639 if (inst.reloc.exp.X_op == O_constant
5640 && inst.reloc.exp.X_add_number == 0)
5642 skip_whitespace (q);
5646 skip_whitespace (q);
5649 inst.operands[i].negative = 1;
5655 /* If at this point neither .preind nor .postind is set, we have a
5656 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5657 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5659 inst.operands[i].preind = 1;
5660 inst.reloc.exp.X_op = O_constant;
5661 inst.reloc.exp.X_add_number = 0;
5664 return PARSE_OPERAND_SUCCESS;
5668 parse_address (char **str, int i)
5670 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5674 static parse_operand_result
5675 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5677 return parse_address_main (str, i, 1, type);
5680 /* Parse an operand for a MOVW or MOVT instruction. */
5682 parse_half (char **str)
5687 skip_past_char (&p, '#');
5688 if (strncasecmp (p, ":lower16:", 9) == 0)
5689 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5690 else if (strncasecmp (p, ":upper16:", 9) == 0)
5691 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5693 if (inst.reloc.type != BFD_RELOC_UNUSED)
5696 skip_whitespace (p);
5699 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5702 if (inst.reloc.type == BFD_RELOC_UNUSED)
5704 if (inst.reloc.exp.X_op != O_constant)
5706 inst.error = _("constant expression expected");
5709 if (inst.reloc.exp.X_add_number < 0
5710 || inst.reloc.exp.X_add_number > 0xffff)
5712 inst.error = _("immediate value out of range");
5720 /* Miscellaneous. */
5722 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5723 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5725 parse_psr (char **str, bfd_boolean lhs)
5728 unsigned long psr_field;
5729 const struct asm_psr *psr;
5731 bfd_boolean is_apsr = FALSE;
5732 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5734 /* PR gas/12698: If the user has specified -march=all then m_profile will
5735 be TRUE, but we want to ignore it in this case as we are building for any
5736 CPU type, including non-m variants. */
5737 if (selected_cpu.core == arm_arch_any.core)
5740 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5741 feature for ease of use and backwards compatibility. */
5743 if (strncasecmp (p, "SPSR", 4) == 0)
5746 goto unsupported_psr;
5748 psr_field = SPSR_BIT;
5750 else if (strncasecmp (p, "CPSR", 4) == 0)
5753 goto unsupported_psr;
5757 else if (strncasecmp (p, "APSR", 4) == 0)
5759 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5760 and ARMv7-R architecture CPUs. */
5769 while (ISALNUM (*p) || *p == '_');
5771 if (strncasecmp (start, "iapsr", 5) == 0
5772 || strncasecmp (start, "eapsr", 5) == 0
5773 || strncasecmp (start, "xpsr", 4) == 0
5774 || strncasecmp (start, "psr", 3) == 0)
5775 p = start + strcspn (start, "rR") + 1;
5777 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5783 /* If APSR is being written, a bitfield may be specified. Note that
5784 APSR itself is handled above. */
5785 if (psr->field <= 3)
5787 psr_field = psr->field;
5793 /* M-profile MSR instructions have the mask field set to "10", except
5794 *PSR variants which modify APSR, which may use a different mask (and
5795 have been handled already). Do that by setting the PSR_f field
5797 return psr->field | (lhs ? PSR_f : 0);
5800 goto unsupported_psr;
5806 /* A suffix follows. */
5812 while (ISALNUM (*p) || *p == '_');
5816 /* APSR uses a notation for bits, rather than fields. */
5817 unsigned int nzcvq_bits = 0;
5818 unsigned int g_bit = 0;
5821 for (bit = start; bit != p; bit++)
5823 switch (TOLOWER (*bit))
5826 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5830 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5834 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5838 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5842 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5846 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5850 inst.error = _("unexpected bit specified after APSR");
5855 if (nzcvq_bits == 0x1f)
5860 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5862 inst.error = _("selected processor does not "
5863 "support DSP extension");
5870 if ((nzcvq_bits & 0x20) != 0
5871 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5872 || (g_bit & 0x2) != 0)
5874 inst.error = _("bad bitmask specified after APSR");
5880 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5885 psr_field |= psr->field;
5891 goto error; /* Garbage after "[CS]PSR". */
5893 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5894 is deprecated, but allow it anyway. */
5898 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5901 else if (!m_profile)
5902 /* These bits are never right for M-profile devices: don't set them
5903 (only code paths which read/write APSR reach here). */
5904 psr_field |= (PSR_c | PSR_f);
5910 inst.error = _("selected processor does not support requested special "
5911 "purpose register");
5915 inst.error = _("flag for {c}psr instruction expected");
5919 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5920 value suitable for splatting into the AIF field of the instruction. */
5923 parse_cps_flags (char **str)
5932 case '\0': case ',':
5935 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5936 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5937 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5940 inst.error = _("unrecognized CPS flag");
5945 if (saw_a_flag == 0)
5947 inst.error = _("missing CPS flags");
5955 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5956 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5959 parse_endian_specifier (char **str)
5964 if (strncasecmp (s, "BE", 2))
5966 else if (strncasecmp (s, "LE", 2))
5970 inst.error = _("valid endian specifiers are be or le");
5974 if (ISALNUM (s[2]) || s[2] == '_')
5976 inst.error = _("valid endian specifiers are be or le");
5981 return little_endian;
5984 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5985 value suitable for poking into the rotate field of an sxt or sxta
5986 instruction, or FAIL on error. */
5989 parse_ror (char **str)
5994 if (strncasecmp (s, "ROR", 3) == 0)
5998 inst.error = _("missing rotation field after comma");
6002 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6007 case 0: *str = s; return 0x0;
6008 case 8: *str = s; return 0x1;
6009 case 16: *str = s; return 0x2;
6010 case 24: *str = s; return 0x3;
6013 inst.error = _("rotation can only be 0, 8, 16, or 24");
6018 /* Parse a conditional code (from conds[] below). The value returned is in the
6019 range 0 .. 14, or FAIL. */
6021 parse_cond (char **str)
6024 const struct asm_cond *c;
6026 /* Condition codes are always 2 characters, so matching up to
6027 3 characters is sufficient. */
6032 while (ISALPHA (*q) && n < 3)
6034 cond[n] = TOLOWER (*q);
6039 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6042 inst.error = _("condition required");
6050 /* If the given feature available in the selected CPU, mark it as used.
6051 Returns TRUE iff feature is available. */
6053 mark_feature_used (const arm_feature_set *feature)
6055 /* Ensure the option is valid on the current architecture. */
6056 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6059 /* Add the appropriate architecture feature for the barrier option used.
6062 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6064 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6069 /* Parse an option for a barrier instruction. Returns the encoding for the
6072 parse_barrier (char **str)
6075 const struct asm_barrier_opt *o;
6078 while (ISALPHA (*q))
6081 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6086 if (!mark_feature_used (&o->arch))
6093 /* Parse the operands of a table branch instruction. Similar to a memory
6096 parse_tb (char **str)
6101 if (skip_past_char (&p, '[') == FAIL)
6103 inst.error = _("'[' expected");
6107 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6109 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6112 inst.operands[0].reg = reg;
6114 if (skip_past_comma (&p) == FAIL)
6116 inst.error = _("',' expected");
6120 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6122 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6125 inst.operands[0].imm = reg;
6127 if (skip_past_comma (&p) == SUCCESS)
6129 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6131 if (inst.reloc.exp.X_add_number != 1)
6133 inst.error = _("invalid shift");
6136 inst.operands[0].shifted = 1;
6139 if (skip_past_char (&p, ']') == FAIL)
6141 inst.error = _("']' expected");
6148 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6149 information on the types the operands can take and how they are encoded.
6150 Up to four operands may be read; this function handles setting the
6151 ".present" field for each read operand itself.
6152 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6153 else returns FAIL. */
6156 parse_neon_mov (char **str, int *which_operand)
6158 int i = *which_operand, val;
6159 enum arm_reg_type rtype;
6161 struct neon_type_el optype;
6163 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6165 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6166 inst.operands[i].reg = val;
6167 inst.operands[i].isscalar = 1;
6168 inst.operands[i].vectype = optype;
6169 inst.operands[i++].present = 1;
6171 if (skip_past_comma (&ptr) == FAIL)
6174 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6177 inst.operands[i].reg = val;
6178 inst.operands[i].isreg = 1;
6179 inst.operands[i].present = 1;
6181 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6184 /* Cases 0, 1, 2, 3, 5 (D only). */
6185 if (skip_past_comma (&ptr) == FAIL)
6188 inst.operands[i].reg = val;
6189 inst.operands[i].isreg = 1;
6190 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6191 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6192 inst.operands[i].isvec = 1;
6193 inst.operands[i].vectype = optype;
6194 inst.operands[i++].present = 1;
6196 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6198 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6199 Case 13: VMOV <Sd>, <Rm> */
6200 inst.operands[i].reg = val;
6201 inst.operands[i].isreg = 1;
6202 inst.operands[i].present = 1;
6204 if (rtype == REG_TYPE_NQ)
6206 first_error (_("can't use Neon quad register here"));
6209 else if (rtype != REG_TYPE_VFS)
6212 if (skip_past_comma (&ptr) == FAIL)
6214 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6216 inst.operands[i].reg = val;
6217 inst.operands[i].isreg = 1;
6218 inst.operands[i].present = 1;
6221 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6224 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6225 Case 1: VMOV<c><q> <Dd>, <Dm>
6226 Case 8: VMOV.F32 <Sd>, <Sm>
6227 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6229 inst.operands[i].reg = val;
6230 inst.operands[i].isreg = 1;
6231 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6232 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6233 inst.operands[i].isvec = 1;
6234 inst.operands[i].vectype = optype;
6235 inst.operands[i].present = 1;
6237 if (skip_past_comma (&ptr) == SUCCESS)
6242 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6245 inst.operands[i].reg = val;
6246 inst.operands[i].isreg = 1;
6247 inst.operands[i++].present = 1;
6249 if (skip_past_comma (&ptr) == FAIL)
6252 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6255 inst.operands[i].reg = val;
6256 inst.operands[i].isreg = 1;
6257 inst.operands[i].present = 1;
6260 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6261 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6262 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6263 Case 10: VMOV.F32 <Sd>, #<imm>
6264 Case 11: VMOV.F64 <Dd>, #<imm> */
6265 inst.operands[i].immisfloat = 1;
6266 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6268 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6269 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6273 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6277 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6280 inst.operands[i].reg = val;
6281 inst.operands[i].isreg = 1;
6282 inst.operands[i++].present = 1;
6284 if (skip_past_comma (&ptr) == FAIL)
6287 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6289 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6290 inst.operands[i].reg = val;
6291 inst.operands[i].isscalar = 1;
6292 inst.operands[i].present = 1;
6293 inst.operands[i].vectype = optype;
6295 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6297 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6298 inst.operands[i].reg = val;
6299 inst.operands[i].isreg = 1;
6300 inst.operands[i++].present = 1;
6302 if (skip_past_comma (&ptr) == FAIL)
6305 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6308 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6312 inst.operands[i].reg = val;
6313 inst.operands[i].isreg = 1;
6314 inst.operands[i].isvec = 1;
6315 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6316 inst.operands[i].vectype = optype;
6317 inst.operands[i].present = 1;
6319 if (rtype == REG_TYPE_VFS)
6323 if (skip_past_comma (&ptr) == FAIL)
6325 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6328 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6331 inst.operands[i].reg = val;
6332 inst.operands[i].isreg = 1;
6333 inst.operands[i].isvec = 1;
6334 inst.operands[i].issingle = 1;
6335 inst.operands[i].vectype = optype;
6336 inst.operands[i].present = 1;
6339 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6343 inst.operands[i].reg = val;
6344 inst.operands[i].isreg = 1;
6345 inst.operands[i].isvec = 1;
6346 inst.operands[i].issingle = 1;
6347 inst.operands[i].vectype = optype;
6348 inst.operands[i].present = 1;
6353 first_error (_("parse error"));
6357 /* Successfully parsed the operands. Update args. */
6363 first_error (_("expected comma"));
6367 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6371 /* Use this macro when the operand constraints are different
6372 for ARM and THUMB (e.g. ldrd). */
6373 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6374 ((arm_operand) | ((thumb_operand) << 16))
6376 /* Matcher codes for parse_operands. */
6377 enum operand_parse_code
6379 OP_stop, /* end of line */
6381 OP_RR, /* ARM register */
6382 OP_RRnpc, /* ARM register, not r15 */
6383 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6384 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6385 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6386 optional trailing ! */
6387 OP_RRw, /* ARM register, not r15, optional trailing ! */
6388 OP_RCP, /* Coprocessor number */
6389 OP_RCN, /* Coprocessor register */
6390 OP_RF, /* FPA register */
6391 OP_RVS, /* VFP single precision register */
6392 OP_RVD, /* VFP double precision register (0..15) */
6393 OP_RND, /* Neon double precision register (0..31) */
6394 OP_RNQ, /* Neon quad precision register */
6395 OP_RVSD, /* VFP single or double precision register */
6396 OP_RNDQ, /* Neon double or quad precision register */
6397 OP_RNSDQ, /* Neon single, double or quad precision register */
6398 OP_RNSC, /* Neon scalar D[X] */
6399 OP_RVC, /* VFP control register */
6400 OP_RMF, /* Maverick F register */
6401 OP_RMD, /* Maverick D register */
6402 OP_RMFX, /* Maverick FX register */
6403 OP_RMDX, /* Maverick DX register */
6404 OP_RMAX, /* Maverick AX register */
6405 OP_RMDS, /* Maverick DSPSC register */
6406 OP_RIWR, /* iWMMXt wR register */
6407 OP_RIWC, /* iWMMXt wC register */
6408 OP_RIWG, /* iWMMXt wCG register */
6409 OP_RXA, /* XScale accumulator register */
6411 OP_REGLST, /* ARM register list */
6412 OP_VRSLST, /* VFP single-precision register list */
6413 OP_VRDLST, /* VFP double-precision register list */
6414 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6415 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6416 OP_NSTRLST, /* Neon element/structure list */
6418 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6419 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6420 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6421 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6422 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6423 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6424 OP_VMOV, /* Neon VMOV operands. */
6425 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6426 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6427 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6429 OP_I0, /* immediate zero */
6430 OP_I7, /* immediate value 0 .. 7 */
6431 OP_I15, /* 0 .. 15 */
6432 OP_I16, /* 1 .. 16 */
6433 OP_I16z, /* 0 .. 16 */
6434 OP_I31, /* 0 .. 31 */
6435 OP_I31w, /* 0 .. 31, optional trailing ! */
6436 OP_I32, /* 1 .. 32 */
6437 OP_I32z, /* 0 .. 32 */
6438 OP_I63, /* 0 .. 63 */
6439 OP_I63s, /* -64 .. 63 */
6440 OP_I64, /* 1 .. 64 */
6441 OP_I64z, /* 0 .. 64 */
6442 OP_I255, /* 0 .. 255 */
6444 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6445 OP_I7b, /* 0 .. 7 */
6446 OP_I15b, /* 0 .. 15 */
6447 OP_I31b, /* 0 .. 31 */
6449 OP_SH, /* shifter operand */
6450 OP_SHG, /* shifter operand with possible group relocation */
6451 OP_ADDR, /* Memory address expression (any mode) */
6452 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6453 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6454 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6455 OP_EXP, /* arbitrary expression */
6456 OP_EXPi, /* same, with optional immediate prefix */
6457 OP_EXPr, /* same, with optional relocation suffix */
6458 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6460 OP_CPSF, /* CPS flags */
6461 OP_ENDI, /* Endianness specifier */
6462 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6463 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6464 OP_COND, /* conditional code */
6465 OP_TB, /* Table branch. */
6467 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6469 OP_RRnpc_I0, /* ARM register or literal 0 */
6470 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6471 OP_RR_EXi, /* ARM register or expression with imm prefix */
6472 OP_RF_IF, /* FPA register or immediate */
6473 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6474 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6476 /* Optional operands. */
6477 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6478 OP_oI31b, /* 0 .. 31 */
6479 OP_oI32b, /* 1 .. 32 */
6480 OP_oI32z, /* 0 .. 32 */
6481 OP_oIffffb, /* 0 .. 65535 */
6482 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6484 OP_oRR, /* ARM register */
6485 OP_oRRnpc, /* ARM register, not the PC */
6486 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6487 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6488 OP_oRND, /* Optional Neon double precision register */
6489 OP_oRNQ, /* Optional Neon quad precision register */
6490 OP_oRNDQ, /* Optional Neon double or quad precision register */
6491 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6492 OP_oSHll, /* LSL immediate */
6493 OP_oSHar, /* ASR immediate */
6494 OP_oSHllar, /* LSL or ASR immediate */
6495 OP_oROR, /* ROR 0/8/16/24 */
6496 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6498 /* Some pre-defined mixed (ARM/THUMB) operands. */
6499 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6500 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6501 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6503 OP_FIRST_OPTIONAL = OP_oI7b
6506 /* Generic instruction operand parser. This does no encoding and no
6507 semantic validation; it merely squirrels values away in the inst
6508 structure. Returns SUCCESS or FAIL depending on whether the
6509 specified grammar matched. */
6511 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6513 unsigned const int *upat = pattern;
6514 char *backtrack_pos = 0;
6515 const char *backtrack_error = 0;
6516 int i, val = 0, backtrack_index = 0;
6517 enum arm_reg_type rtype;
6518 parse_operand_result result;
6519 unsigned int op_parse_code;
6521 #define po_char_or_fail(chr) \
6524 if (skip_past_char (&str, chr) == FAIL) \
6529 #define po_reg_or_fail(regtype) \
6532 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6533 & inst.operands[i].vectype); \
6536 first_error (_(reg_expected_msgs[regtype])); \
6539 inst.operands[i].reg = val; \
6540 inst.operands[i].isreg = 1; \
6541 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6542 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6543 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6544 || rtype == REG_TYPE_VFD \
6545 || rtype == REG_TYPE_NQ); \
6549 #define po_reg_or_goto(regtype, label) \
6552 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6553 & inst.operands[i].vectype); \
6557 inst.operands[i].reg = val; \
6558 inst.operands[i].isreg = 1; \
6559 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6560 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6561 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6562 || rtype == REG_TYPE_VFD \
6563 || rtype == REG_TYPE_NQ); \
6567 #define po_imm_or_fail(min, max, popt) \
6570 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6572 inst.operands[i].imm = val; \
6576 #define po_scalar_or_goto(elsz, label) \
6579 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6582 inst.operands[i].reg = val; \
6583 inst.operands[i].isscalar = 1; \
6587 #define po_misc_or_fail(expr) \
6595 #define po_misc_or_fail_no_backtrack(expr) \
6599 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6600 backtrack_pos = 0; \
6601 if (result != PARSE_OPERAND_SUCCESS) \
6606 #define po_barrier_or_imm(str) \
6609 val = parse_barrier (&str); \
6610 if (val == FAIL && ! ISALPHA (*str)) \
6613 /* ISB can only take SY as an option. */ \
6614 || ((inst.instruction & 0xf0) == 0x60 \
6617 inst.error = _("invalid barrier type"); \
6618 backtrack_pos = 0; \
6624 skip_whitespace (str);
6626 for (i = 0; upat[i] != OP_stop; i++)
6628 op_parse_code = upat[i];
6629 if (op_parse_code >= 1<<16)
6630 op_parse_code = thumb ? (op_parse_code >> 16)
6631 : (op_parse_code & ((1<<16)-1));
6633 if (op_parse_code >= OP_FIRST_OPTIONAL)
6635 /* Remember where we are in case we need to backtrack. */
6636 gas_assert (!backtrack_pos);
6637 backtrack_pos = str;
6638 backtrack_error = inst.error;
6639 backtrack_index = i;
6642 if (i > 0 && (i > 1 || inst.operands[0].present))
6643 po_char_or_fail (',');
6645 switch (op_parse_code)
6653 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6654 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6655 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6656 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6657 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6658 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6660 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6662 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6664 /* Also accept generic coprocessor regs for unknown registers. */
6666 po_reg_or_fail (REG_TYPE_CN);
6668 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6669 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6670 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6671 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6672 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6673 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6674 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6675 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6676 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6677 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6679 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6681 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6682 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6684 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6686 /* Neon scalar. Using an element size of 8 means that some invalid
6687 scalars are accepted here, so deal with those in later code. */
6688 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6692 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6695 po_imm_or_fail (0, 0, TRUE);
6700 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6705 po_scalar_or_goto (8, try_rr);
6708 po_reg_or_fail (REG_TYPE_RN);
6714 po_scalar_or_goto (8, try_nsdq);
6717 po_reg_or_fail (REG_TYPE_NSDQ);
6723 po_scalar_or_goto (8, try_ndq);
6726 po_reg_or_fail (REG_TYPE_NDQ);
6732 po_scalar_or_goto (8, try_vfd);
6735 po_reg_or_fail (REG_TYPE_VFD);
6740 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6741 not careful then bad things might happen. */
6742 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6747 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6750 /* There's a possibility of getting a 64-bit immediate here, so
6751 we need special handling. */
6752 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6755 inst.error = _("immediate value is out of range");
6763 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6766 po_imm_or_fail (0, 63, TRUE);
6771 po_char_or_fail ('[');
6772 po_reg_or_fail (REG_TYPE_RN);
6773 po_char_or_fail (']');
6779 po_reg_or_fail (REG_TYPE_RN);
6780 if (skip_past_char (&str, '!') == SUCCESS)
6781 inst.operands[i].writeback = 1;
6785 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6786 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6787 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6788 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6789 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6790 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6791 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6792 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6793 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6794 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6795 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6796 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6798 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6800 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6801 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6803 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6804 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6805 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6806 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6808 /* Immediate variants */
6810 po_char_or_fail ('{');
6811 po_imm_or_fail (0, 255, TRUE);
6812 po_char_or_fail ('}');
6816 /* The expression parser chokes on a trailing !, so we have
6817 to find it first and zap it. */
6820 while (*s && *s != ',')
6825 inst.operands[i].writeback = 1;
6827 po_imm_or_fail (0, 31, TRUE);
6835 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6840 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6845 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6847 if (inst.reloc.exp.X_op == O_symbol)
6849 val = parse_reloc (&str);
6852 inst.error = _("unrecognized relocation suffix");
6855 else if (val != BFD_RELOC_UNUSED)
6857 inst.operands[i].imm = val;
6858 inst.operands[i].hasreloc = 1;
6863 /* Operand for MOVW or MOVT. */
6865 po_misc_or_fail (parse_half (&str));
6868 /* Register or expression. */
6869 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6870 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6872 /* Register or immediate. */
6873 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6874 I0: po_imm_or_fail (0, 0, FALSE); break;
6876 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6878 if (!is_immediate_prefix (*str))
6881 val = parse_fpa_immediate (&str);
6884 /* FPA immediates are encoded as registers 8-15.
6885 parse_fpa_immediate has already applied the offset. */
6886 inst.operands[i].reg = val;
6887 inst.operands[i].isreg = 1;
6890 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6891 I32z: po_imm_or_fail (0, 32, FALSE); break;
6893 /* Two kinds of register. */
6896 struct reg_entry *rege = arm_reg_parse_multi (&str);
6898 || (rege->type != REG_TYPE_MMXWR
6899 && rege->type != REG_TYPE_MMXWC
6900 && rege->type != REG_TYPE_MMXWCG))
6902 inst.error = _("iWMMXt data or control register expected");
6905 inst.operands[i].reg = rege->number;
6906 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6912 struct reg_entry *rege = arm_reg_parse_multi (&str);
6914 || (rege->type != REG_TYPE_MMXWC
6915 && rege->type != REG_TYPE_MMXWCG))
6917 inst.error = _("iWMMXt control register expected");
6920 inst.operands[i].reg = rege->number;
6921 inst.operands[i].isreg = 1;
6926 case OP_CPSF: val = parse_cps_flags (&str); break;
6927 case OP_ENDI: val = parse_endian_specifier (&str); break;
6928 case OP_oROR: val = parse_ror (&str); break;
6929 case OP_COND: val = parse_cond (&str); break;
6930 case OP_oBARRIER_I15:
6931 po_barrier_or_imm (str); break;
6933 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6939 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6940 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6942 inst.error = _("Banked registers are not available with this "
6948 val = parse_psr (&str, op_parse_code == OP_wPSR);
6952 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6955 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6957 if (strncasecmp (str, "APSR_", 5) == 0)
6964 case 'c': found = (found & 1) ? 16 : found | 1; break;
6965 case 'n': found = (found & 2) ? 16 : found | 2; break;
6966 case 'z': found = (found & 4) ? 16 : found | 4; break;
6967 case 'v': found = (found & 8) ? 16 : found | 8; break;
6968 default: found = 16;
6972 inst.operands[i].isvec = 1;
6973 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6974 inst.operands[i].reg = REG_PC;
6981 po_misc_or_fail (parse_tb (&str));
6984 /* Register lists. */
6986 val = parse_reg_list (&str);
6989 inst.operands[1].writeback = 1;
6995 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6999 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7003 /* Allow Q registers too. */
7004 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7009 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7011 inst.operands[i].issingle = 1;
7016 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7021 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7022 &inst.operands[i].vectype);
7025 /* Addressing modes */
7027 po_misc_or_fail (parse_address (&str, i));
7031 po_misc_or_fail_no_backtrack (
7032 parse_address_group_reloc (&str, i, GROUP_LDR));
7036 po_misc_or_fail_no_backtrack (
7037 parse_address_group_reloc (&str, i, GROUP_LDRS));
7041 po_misc_or_fail_no_backtrack (
7042 parse_address_group_reloc (&str, i, GROUP_LDC));
7046 po_misc_or_fail (parse_shifter_operand (&str, i));
7050 po_misc_or_fail_no_backtrack (
7051 parse_shifter_operand_group_reloc (&str, i));
7055 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7059 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7063 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7067 as_fatal (_("unhandled operand code %d"), op_parse_code);
7070 /* Various value-based sanity checks and shared operations. We
7071 do not signal immediate failures for the register constraints;
7072 this allows a syntax error to take precedence. */
7073 switch (op_parse_code)
7081 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7082 inst.error = BAD_PC;
7087 if (inst.operands[i].isreg)
7089 if (inst.operands[i].reg == REG_PC)
7090 inst.error = BAD_PC;
7091 else if (inst.operands[i].reg == REG_SP)
7092 inst.error = BAD_SP;
7097 if (inst.operands[i].isreg
7098 && inst.operands[i].reg == REG_PC
7099 && (inst.operands[i].writeback || thumb))
7100 inst.error = BAD_PC;
7109 case OP_oBARRIER_I15:
7118 inst.operands[i].imm = val;
7125 /* If we get here, this operand was successfully parsed. */
7126 inst.operands[i].present = 1;
7130 inst.error = BAD_ARGS;
7135 /* The parse routine should already have set inst.error, but set a
7136 default here just in case. */
7138 inst.error = _("syntax error");
7142 /* Do not backtrack over a trailing optional argument that
7143 absorbed some text. We will only fail again, with the
7144 'garbage following instruction' error message, which is
7145 probably less helpful than the current one. */
7146 if (backtrack_index == i && backtrack_pos != str
7147 && upat[i+1] == OP_stop)
7150 inst.error = _("syntax error");
7154 /* Try again, skipping the optional argument at backtrack_pos. */
7155 str = backtrack_pos;
7156 inst.error = backtrack_error;
7157 inst.operands[backtrack_index].present = 0;
7158 i = backtrack_index;
7162 /* Check that we have parsed all the arguments. */
7163 if (*str != '\0' && !inst.error)
7164 inst.error = _("garbage following instruction");
7166 return inst.error ? FAIL : SUCCESS;
7169 #undef po_char_or_fail
7170 #undef po_reg_or_fail
7171 #undef po_reg_or_goto
7172 #undef po_imm_or_fail
7173 #undef po_scalar_or_fail
7174 #undef po_barrier_or_imm
7176 /* Shorthand macro for instruction encoding functions issuing errors. */
7177 #define constraint(expr, err) \
7188 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7189 instructions are unpredictable if these registers are used. This
7190 is the BadReg predicate in ARM's Thumb-2 documentation. */
7191 #define reject_bad_reg(reg) \
7193 if (reg == REG_SP || reg == REG_PC) \
7195 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7200 /* If REG is R13 (the stack pointer), warn that its use is
7202 #define warn_deprecated_sp(reg) \
7204 if (warn_on_deprecated && reg == REG_SP) \
7205 as_warn (_("use of r13 is deprecated")); \
7208 /* Functions for operand encoding. ARM, then Thumb. */
7210 #define rotate_left(v, n) (v << n | v >> (32 - n))
7212 /* If VAL can be encoded in the immediate field of an ARM instruction,
7213 return the encoded form. Otherwise, return FAIL. */
7216 encode_arm_immediate (unsigned int val)
7220 for (i = 0; i < 32; i += 2)
7221 if ((a = rotate_left (val, i)) <= 0xff)
7222 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7227 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7228 return the encoded form. Otherwise, return FAIL. */
7230 encode_thumb32_immediate (unsigned int val)
7237 for (i = 1; i <= 24; i++)
7240 if ((val & ~(0xff << i)) == 0)
7241 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7245 if (val == ((a << 16) | a))
7247 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7251 if (val == ((a << 16) | a))
7252 return 0x200 | (a >> 8);
7256 /* Encode a VFP SP or DP register number into inst.instruction. */
7259 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7261 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7264 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7267 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7270 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7275 first_error (_("D register out of range for selected VFP version"));
7283 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7287 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7291 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7295 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7299 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7303 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7311 /* Encode a <shift> in an ARM-format instruction. The immediate,
7312 if any, is handled by md_apply_fix. */
7314 encode_arm_shift (int i)
7316 if (inst.operands[i].shift_kind == SHIFT_RRX)
7317 inst.instruction |= SHIFT_ROR << 5;
7320 inst.instruction |= inst.operands[i].shift_kind << 5;
7321 if (inst.operands[i].immisreg)
7323 inst.instruction |= SHIFT_BY_REG;
7324 inst.instruction |= inst.operands[i].imm << 8;
7327 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7332 encode_arm_shifter_operand (int i)
7334 if (inst.operands[i].isreg)
7336 inst.instruction |= inst.operands[i].reg;
7337 encode_arm_shift (i);
7341 inst.instruction |= INST_IMMEDIATE;
7342 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7343 inst.instruction |= inst.operands[i].imm;
7347 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7349 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7352 Generate an error if the operand is not a register. */
7353 constraint (!inst.operands[i].isreg,
7354 _("Instruction does not support =N addresses"));
7356 inst.instruction |= inst.operands[i].reg << 16;
7358 if (inst.operands[i].preind)
7362 inst.error = _("instruction does not accept preindexed addressing");
7365 inst.instruction |= PRE_INDEX;
7366 if (inst.operands[i].writeback)
7367 inst.instruction |= WRITE_BACK;
7370 else if (inst.operands[i].postind)
7372 gas_assert (inst.operands[i].writeback);
7374 inst.instruction |= WRITE_BACK;
7376 else /* unindexed - only for coprocessor */
7378 inst.error = _("instruction does not accept unindexed addressing");
7382 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7383 && (((inst.instruction & 0x000f0000) >> 16)
7384 == ((inst.instruction & 0x0000f000) >> 12)))
7385 as_warn ((inst.instruction & LOAD_BIT)
7386 ? _("destination register same as write-back base")
7387 : _("source register same as write-back base"));
7390 /* inst.operands[i] was set up by parse_address. Encode it into an
7391 ARM-format mode 2 load or store instruction. If is_t is true,
7392 reject forms that cannot be used with a T instruction (i.e. not
7395 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7397 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7399 encode_arm_addr_mode_common (i, is_t);
7401 if (inst.operands[i].immisreg)
7403 constraint ((inst.operands[i].imm == REG_PC
7404 || (is_pc && inst.operands[i].writeback)),
7406 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7407 inst.instruction |= inst.operands[i].imm;
7408 if (!inst.operands[i].negative)
7409 inst.instruction |= INDEX_UP;
7410 if (inst.operands[i].shifted)
7412 if (inst.operands[i].shift_kind == SHIFT_RRX)
7413 inst.instruction |= SHIFT_ROR << 5;
7416 inst.instruction |= inst.operands[i].shift_kind << 5;
7417 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7421 else /* immediate offset in inst.reloc */
7423 if (is_pc && !inst.reloc.pc_rel)
7425 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7427 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7428 cannot use PC in addressing.
7429 PC cannot be used in writeback addressing, either. */
7430 constraint ((is_t || inst.operands[i].writeback),
7433 /* Use of PC in str is deprecated for ARMv7. */
7434 if (warn_on_deprecated
7436 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7437 as_warn (_("use of PC in this instruction is deprecated"));
7440 if (inst.reloc.type == BFD_RELOC_UNUSED)
7442 /* Prefer + for zero encoded value. */
7443 if (!inst.operands[i].negative)
7444 inst.instruction |= INDEX_UP;
7445 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7450 /* inst.operands[i] was set up by parse_address. Encode it into an
7451 ARM-format mode 3 load or store instruction. Reject forms that
7452 cannot be used with such instructions. If is_t is true, reject
7453 forms that cannot be used with a T instruction (i.e. not
7456 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7458 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7460 inst.error = _("instruction does not accept scaled register index");
7464 encode_arm_addr_mode_common (i, is_t);
7466 if (inst.operands[i].immisreg)
7468 constraint ((inst.operands[i].imm == REG_PC
7469 || (is_t && inst.operands[i].reg == REG_PC)),
7471 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7473 inst.instruction |= inst.operands[i].imm;
7474 if (!inst.operands[i].negative)
7475 inst.instruction |= INDEX_UP;
7477 else /* immediate offset in inst.reloc */
7479 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7480 && inst.operands[i].writeback),
7482 inst.instruction |= HWOFFSET_IMM;
7483 if (inst.reloc.type == BFD_RELOC_UNUSED)
7485 /* Prefer + for zero encoded value. */
7486 if (!inst.operands[i].negative)
7487 inst.instruction |= INDEX_UP;
7489 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7494 /* Write immediate bits [7:0] to the following locations:
7496 |28/24|23 19|18 16|15 4|3 0|
7497 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7499 This function is used by VMOV/VMVN/VORR/VBIC. */
7502 neon_write_immbits (unsigned immbits)
7504 inst.instruction |= immbits & 0xf;
7505 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7506 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7509 /* Invert low-order SIZE bits of XHI:XLO. */
7512 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7514 unsigned immlo = xlo ? *xlo : 0;
7515 unsigned immhi = xhi ? *xhi : 0;
7520 immlo = (~immlo) & 0xff;
7524 immlo = (~immlo) & 0xffff;
7528 immhi = (~immhi) & 0xffffffff;
7532 immlo = (~immlo) & 0xffffffff;
7546 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7550 neon_bits_same_in_bytes (unsigned imm)
7552 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7553 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7554 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7555 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7558 /* For immediate of above form, return 0bABCD. */
7561 neon_squash_bits (unsigned imm)
7563 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7564 | ((imm & 0x01000000) >> 21);
7567 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7570 neon_qfloat_bits (unsigned imm)
7572 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7575 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7576 the instruction. *OP is passed as the initial value of the op field, and
7577 may be set to a different value depending on the constant (i.e.
7578 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7579 MVN). If the immediate looks like a repeated pattern then also
7580 try smaller element sizes. */
7583 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7584 unsigned *immbits, int *op, int size,
7585 enum neon_el_type type)
7587 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7589 if (type == NT_float && !float_p)
7592 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7594 if (size != 32 || *op == 1)
7596 *immbits = neon_qfloat_bits (immlo);
7602 if (neon_bits_same_in_bytes (immhi)
7603 && neon_bits_same_in_bytes (immlo))
7607 *immbits = (neon_squash_bits (immhi) << 4)
7608 | neon_squash_bits (immlo);
7619 if (immlo == (immlo & 0x000000ff))
7624 else if (immlo == (immlo & 0x0000ff00))
7626 *immbits = immlo >> 8;
7629 else if (immlo == (immlo & 0x00ff0000))
7631 *immbits = immlo >> 16;
7634 else if (immlo == (immlo & 0xff000000))
7636 *immbits = immlo >> 24;
7639 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7641 *immbits = (immlo >> 8) & 0xff;
7644 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7646 *immbits = (immlo >> 16) & 0xff;
7650 if ((immlo & 0xffff) != (immlo >> 16))
7657 if (immlo == (immlo & 0x000000ff))
7662 else if (immlo == (immlo & 0x0000ff00))
7664 *immbits = immlo >> 8;
7668 if ((immlo & 0xff) != (immlo >> 8))
7673 if (immlo == (immlo & 0x000000ff))
7675 /* Don't allow MVN with 8-bit immediate. */
7692 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7693 Determine whether it can be performed with a move instruction; if
7694 it can, convert inst.instruction to that move instruction and
7695 return TRUE; if it can't, convert inst.instruction to a literal-pool
7696 load and return FALSE. If this is not a valid thing to do in the
7697 current context, set inst.error and return TRUE.
7699 inst.operands[i] describes the destination register. */
7702 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7705 bfd_boolean thumb_p = (t == CONST_THUMB);
7706 bfd_boolean arm_p = (t == CONST_ARM);
7707 bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
7710 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7714 if ((inst.instruction & tbit) == 0)
7716 inst.error = _("invalid pseudo operation");
7719 if (inst.reloc.exp.X_op != O_constant
7720 && inst.reloc.exp.X_op != O_symbol
7721 && inst.reloc.exp.X_op != O_big)
7723 inst.error = _("constant expression expected");
7726 if ((inst.reloc.exp.X_op == O_constant
7727 || inst.reloc.exp.X_op == O_big)
7728 && !inst.operands[i].issingle)
7730 if (thumb_p && inst.reloc.exp.X_op == O_constant)
7732 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7734 /* This can be done with a mov(1) instruction. */
7735 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7736 inst.instruction |= inst.reloc.exp.X_add_number;
7740 else if (arm_p && inst.reloc.exp.X_op == O_constant)
7742 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7745 /* This can be done with a mov instruction. */
7746 inst.instruction &= LITERAL_MASK;
7747 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7748 inst.instruction |= value & 0xfff;
7752 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7755 /* This can be done with a mvn instruction. */
7756 inst.instruction &= LITERAL_MASK;
7757 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7758 inst.instruction |= value & 0xfff;
7765 unsigned immbits = 0;
7766 unsigned immlo = inst.operands[1].imm;
7767 unsigned immhi = inst.operands[1].regisimm
7768 ? inst.operands[1].reg
7769 : inst.reloc.exp.X_unsigned
7771 : ((int64_t)((int) immlo)) >> 32;
7772 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7773 &op, 64, NT_invtype);
7777 neon_invert_size (&immlo, &immhi, 64);
7779 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7780 &op, 64, NT_invtype);
7784 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7789 /* Fill other bits in vmov encoding for both thumb and arm. */
7791 inst.instruction |= (0x7 << 29) | (0xF << 24);
7793 inst.instruction |= (0xF << 28) | (0x1 << 25);
7794 neon_write_immbits (immbits);
7800 if (add_to_lit_pool ((!inst.operands[i].isvec
7801 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
7804 inst.operands[1].reg = REG_PC;
7805 inst.operands[1].isreg = 1;
7806 inst.operands[1].preind = 1;
7807 inst.reloc.pc_rel = 1;
7808 inst.reloc.type = (thumb_p
7809 ? BFD_RELOC_ARM_THUMB_OFFSET
7811 ? BFD_RELOC_ARM_HWLITERAL
7812 : BFD_RELOC_ARM_LITERAL));
7816 /* inst.operands[i] was set up by parse_address. Encode it into an
7817 ARM-format instruction. Reject all forms which cannot be encoded
7818 into a coprocessor load/store instruction. If wb_ok is false,
7819 reject use of writeback; if unind_ok is false, reject use of
7820 unindexed addressing. If reloc_override is not 0, use it instead
7821 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7822 (in which case it is preserved). */
7825 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7827 if (!inst.operands[i].isreg)
7829 gas_assert (inst.operands[0].isvec);
7830 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
7834 inst.instruction |= inst.operands[i].reg << 16;
7836 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7838 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7840 gas_assert (!inst.operands[i].writeback);
7843 inst.error = _("instruction does not support unindexed addressing");
7846 inst.instruction |= inst.operands[i].imm;
7847 inst.instruction |= INDEX_UP;
7851 if (inst.operands[i].preind)
7852 inst.instruction |= PRE_INDEX;
7854 if (inst.operands[i].writeback)
7856 if (inst.operands[i].reg == REG_PC)
7858 inst.error = _("pc may not be used with write-back");
7863 inst.error = _("instruction does not support writeback");
7866 inst.instruction |= WRITE_BACK;
7870 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7871 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7872 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7873 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7876 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7878 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7881 /* Prefer + for zero encoded value. */
7882 if (!inst.operands[i].negative)
7883 inst.instruction |= INDEX_UP;
7888 /* Functions for instruction encoding, sorted by sub-architecture.
7889 First some generics; their names are taken from the conventional
7890 bit positions for register arguments in ARM format instructions. */
7900 inst.instruction |= inst.operands[0].reg << 12;
7906 inst.instruction |= inst.operands[0].reg << 12;
7907 inst.instruction |= inst.operands[1].reg;
7913 inst.instruction |= inst.operands[0].reg;
7914 inst.instruction |= inst.operands[1].reg << 16;
7920 inst.instruction |= inst.operands[0].reg << 12;
7921 inst.instruction |= inst.operands[1].reg << 16;
7927 inst.instruction |= inst.operands[0].reg << 16;
7928 inst.instruction |= inst.operands[1].reg << 12;
7932 check_obsolete (const arm_feature_set *feature, const char *msg)
7934 if (ARM_CPU_IS_ANY (cpu_variant))
7936 as_warn ("%s", msg);
7939 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7951 unsigned Rn = inst.operands[2].reg;
7952 /* Enforce restrictions on SWP instruction. */
7953 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7955 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7956 _("Rn must not overlap other operands"));
7958 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
7960 if (!check_obsolete (&arm_ext_v8,
7961 _("swp{b} use is obsoleted for ARMv8 and later"))
7962 && warn_on_deprecated
7963 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
7964 as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
7967 inst.instruction |= inst.operands[0].reg << 12;
7968 inst.instruction |= inst.operands[1].reg;
7969 inst.instruction |= Rn << 16;
7975 inst.instruction |= inst.operands[0].reg << 12;
7976 inst.instruction |= inst.operands[1].reg << 16;
7977 inst.instruction |= inst.operands[2].reg;
7983 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7984 constraint (((inst.reloc.exp.X_op != O_constant
7985 && inst.reloc.exp.X_op != O_illegal)
7986 || inst.reloc.exp.X_add_number != 0),
7988 inst.instruction |= inst.operands[0].reg;
7989 inst.instruction |= inst.operands[1].reg << 12;
7990 inst.instruction |= inst.operands[2].reg << 16;
7996 inst.instruction |= inst.operands[0].imm;
8002 inst.instruction |= inst.operands[0].reg << 12;
8003 encode_arm_cp_address (1, TRUE, TRUE, 0);
8006 /* ARM instructions, in alphabetical order by function name (except
8007 that wrapper functions appear immediately after the function they
8010 /* This is a pseudo-op of the form "adr rd, label" to be converted
8011 into a relative address of the form "add rd, pc, #label-.-8". */
8016 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8018 /* Frag hacking will turn this into a sub instruction if the offset turns
8019 out to be negative. */
8020 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8021 inst.reloc.pc_rel = 1;
8022 inst.reloc.exp.X_add_number -= 8;
8025 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8026 into a relative address of the form:
8027 add rd, pc, #low(label-.-8)"
8028 add rd, rd, #high(label-.-8)" */
8033 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8035 /* Frag hacking will turn this into a sub instruction if the offset turns
8036 out to be negative. */
8037 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8038 inst.reloc.pc_rel = 1;
8039 inst.size = INSN_SIZE * 2;
8040 inst.reloc.exp.X_add_number -= 8;
8046 if (!inst.operands[1].present)
8047 inst.operands[1].reg = inst.operands[0].reg;
8048 inst.instruction |= inst.operands[0].reg << 12;
8049 inst.instruction |= inst.operands[1].reg << 16;
8050 encode_arm_shifter_operand (2);
8056 if (inst.operands[0].present)
8057 inst.instruction |= inst.operands[0].imm;
8059 inst.instruction |= 0xf;
8065 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8066 constraint (msb > 32, _("bit-field extends past end of register"));
8067 /* The instruction encoding stores the LSB and MSB,
8068 not the LSB and width. */
8069 inst.instruction |= inst.operands[0].reg << 12;
8070 inst.instruction |= inst.operands[1].imm << 7;
8071 inst.instruction |= (msb - 1) << 16;
8079 /* #0 in second position is alternative syntax for bfc, which is
8080 the same instruction but with REG_PC in the Rm field. */
8081 if (!inst.operands[1].isreg)
8082 inst.operands[1].reg = REG_PC;
8084 msb = inst.operands[2].imm + inst.operands[3].imm;
8085 constraint (msb > 32, _("bit-field extends past end of register"));
8086 /* The instruction encoding stores the LSB and MSB,
8087 not the LSB and width. */
8088 inst.instruction |= inst.operands[0].reg << 12;
8089 inst.instruction |= inst.operands[1].reg;
8090 inst.instruction |= inst.operands[2].imm << 7;
8091 inst.instruction |= (msb - 1) << 16;
8097 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8098 _("bit-field extends past end of register"));
8099 inst.instruction |= inst.operands[0].reg << 12;
8100 inst.instruction |= inst.operands[1].reg;
8101 inst.instruction |= inst.operands[2].imm << 7;
8102 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8105 /* ARM V5 breakpoint instruction (argument parse)
8106 BKPT <16 bit unsigned immediate>
8107 Instruction is not conditional.
8108 The bit pattern given in insns[] has the COND_ALWAYS condition,
8109 and it is an error if the caller tried to override that. */
8114 /* Top 12 of 16 bits to bits 19:8. */
8115 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8117 /* Bottom 4 of 16 bits to bits 3:0. */
8118 inst.instruction |= inst.operands[0].imm & 0xf;
8122 encode_branch (int default_reloc)
8124 if (inst.operands[0].hasreloc)
8126 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8127 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8128 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8129 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8130 ? BFD_RELOC_ARM_PLT32
8131 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8134 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8135 inst.reloc.pc_rel = 1;
8142 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8143 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8146 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8153 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8155 if (inst.cond == COND_ALWAYS)
8156 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8158 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8162 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8165 /* ARM V5 branch-link-exchange instruction (argument parse)
8166 BLX <target_addr> ie BLX(1)
8167 BLX{<condition>} <Rm> ie BLX(2)
8168 Unfortunately, there are two different opcodes for this mnemonic.
8169 So, the insns[].value is not used, and the code here zaps values
8170 into inst.instruction.
8171 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8176 if (inst.operands[0].isreg)
8178 /* Arg is a register; the opcode provided by insns[] is correct.
8179 It is not illegal to do "blx pc", just useless. */
8180 if (inst.operands[0].reg == REG_PC)
8181 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8183 inst.instruction |= inst.operands[0].reg;
8187 /* Arg is an address; this instruction cannot be executed
8188 conditionally, and the opcode must be adjusted.
8189 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8190 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8191 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8192 inst.instruction = 0xfa000000;
8193 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8200 bfd_boolean want_reloc;
8202 if (inst.operands[0].reg == REG_PC)
8203 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8205 inst.instruction |= inst.operands[0].reg;
8206 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8207 it is for ARMv4t or earlier. */
8208 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8209 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8213 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8218 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8222 /* ARM v5TEJ. Jump to Jazelle code. */
8227 if (inst.operands[0].reg == REG_PC)
8228 as_tsktsk (_("use of r15 in bxj is not really useful"));
8230 inst.instruction |= inst.operands[0].reg;
8233 /* Co-processor data operation:
8234 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8235 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8239 inst.instruction |= inst.operands[0].reg << 8;
8240 inst.instruction |= inst.operands[1].imm << 20;
8241 inst.instruction |= inst.operands[2].reg << 12;
8242 inst.instruction |= inst.operands[3].reg << 16;
8243 inst.instruction |= inst.operands[4].reg;
8244 inst.instruction |= inst.operands[5].imm << 5;
8250 inst.instruction |= inst.operands[0].reg << 16;
8251 encode_arm_shifter_operand (1);
8254 /* Transfer between coprocessor and ARM registers.
8255 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8260 No special properties. */
8262 struct deprecated_coproc_regs_s
8269 arm_feature_set deprecated;
8270 arm_feature_set obsoleted;
8271 const char *dep_msg;
8272 const char *obs_msg;
8275 #define DEPR_ACCESS_V8 \
8276 N_("This coprocessor register access is deprecated in ARMv8")
8278 /* Table of all deprecated coprocessor registers. */
8279 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8281 {15, 0, 7, 10, 5, /* CP15DMB. */
8282 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8283 DEPR_ACCESS_V8, NULL},
8284 {15, 0, 7, 10, 4, /* CP15DSB. */
8285 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8286 DEPR_ACCESS_V8, NULL},
8287 {15, 0, 7, 5, 4, /* CP15ISB. */
8288 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8289 DEPR_ACCESS_V8, NULL},
8290 {14, 6, 1, 0, 0, /* TEEHBR. */
8291 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8292 DEPR_ACCESS_V8, NULL},
8293 {14, 6, 0, 0, 0, /* TEECR. */
8294 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8295 DEPR_ACCESS_V8, NULL},
8298 #undef DEPR_ACCESS_V8
8300 static const size_t deprecated_coproc_reg_count =
8301 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8309 Rd = inst.operands[2].reg;
8312 if (inst.instruction == 0xee000010
8313 || inst.instruction == 0xfe000010)
8315 reject_bad_reg (Rd);
8318 constraint (Rd == REG_SP, BAD_SP);
8323 if (inst.instruction == 0xe000010)
8324 constraint (Rd == REG_PC, BAD_PC);
8327 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8329 const struct deprecated_coproc_regs_s *r =
8330 deprecated_coproc_regs + i;
8332 if (inst.operands[0].reg == r->cp
8333 && inst.operands[1].imm == r->opc1
8334 && inst.operands[3].reg == r->crn
8335 && inst.operands[4].reg == r->crm
8336 && inst.operands[5].imm == r->opc2)
8338 if (! ARM_CPU_IS_ANY (cpu_variant)
8339 && warn_on_deprecated
8340 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8341 as_warn ("%s", r->dep_msg);
8345 inst.instruction |= inst.operands[0].reg << 8;
8346 inst.instruction |= inst.operands[1].imm << 21;
8347 inst.instruction |= Rd << 12;
8348 inst.instruction |= inst.operands[3].reg << 16;
8349 inst.instruction |= inst.operands[4].reg;
8350 inst.instruction |= inst.operands[5].imm << 5;
8353 /* Transfer between coprocessor register and pair of ARM registers.
8354 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8359 Two XScale instructions are special cases of these:
8361 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8362 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8364 Result unpredictable if Rd or Rn is R15. */
8371 Rd = inst.operands[2].reg;
8372 Rn = inst.operands[3].reg;
8376 reject_bad_reg (Rd);
8377 reject_bad_reg (Rn);
8381 constraint (Rd == REG_PC, BAD_PC);
8382 constraint (Rn == REG_PC, BAD_PC);
8385 inst.instruction |= inst.operands[0].reg << 8;
8386 inst.instruction |= inst.operands[1].imm << 4;
8387 inst.instruction |= Rd << 12;
8388 inst.instruction |= Rn << 16;
8389 inst.instruction |= inst.operands[4].reg;
8395 inst.instruction |= inst.operands[0].imm << 6;
8396 if (inst.operands[1].present)
8398 inst.instruction |= CPSI_MMOD;
8399 inst.instruction |= inst.operands[1].imm;
8406 inst.instruction |= inst.operands[0].imm;
8412 unsigned Rd, Rn, Rm;
8414 Rd = inst.operands[0].reg;
8415 Rn = (inst.operands[1].present
8416 ? inst.operands[1].reg : Rd);
8417 Rm = inst.operands[2].reg;
8419 constraint ((Rd == REG_PC), BAD_PC);
8420 constraint ((Rn == REG_PC), BAD_PC);
8421 constraint ((Rm == REG_PC), BAD_PC);
8423 inst.instruction |= Rd << 16;
8424 inst.instruction |= Rn << 0;
8425 inst.instruction |= Rm << 8;
8431 /* There is no IT instruction in ARM mode. We
8432 process it to do the validation as if in
8433 thumb mode, just in case the code gets
8434 assembled for thumb using the unified syntax. */
8439 set_it_insn_type (IT_INSN);
8440 now_it.mask = (inst.instruction & 0xf) | 0x10;
8441 now_it.cc = inst.operands[0].imm;
8445 /* If there is only one register in the register list,
8446 then return its register number. Otherwise return -1. */
8448 only_one_reg_in_list (int range)
8450 int i = ffs (range) - 1;
8451 return (i > 15 || range != (1 << i)) ? -1 : i;
8455 encode_ldmstm(int from_push_pop_mnem)
8457 int base_reg = inst.operands[0].reg;
8458 int range = inst.operands[1].imm;
8461 inst.instruction |= base_reg << 16;
8462 inst.instruction |= range;
8464 if (inst.operands[1].writeback)
8465 inst.instruction |= LDM_TYPE_2_OR_3;
8467 if (inst.operands[0].writeback)
8469 inst.instruction |= WRITE_BACK;
8470 /* Check for unpredictable uses of writeback. */
8471 if (inst.instruction & LOAD_BIT)
8473 /* Not allowed in LDM type 2. */
8474 if ((inst.instruction & LDM_TYPE_2_OR_3)
8475 && ((range & (1 << REG_PC)) == 0))
8476 as_warn (_("writeback of base register is UNPREDICTABLE"));
8477 /* Only allowed if base reg not in list for other types. */
8478 else if (range & (1 << base_reg))
8479 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8483 /* Not allowed for type 2. */
8484 if (inst.instruction & LDM_TYPE_2_OR_3)
8485 as_warn (_("writeback of base register is UNPREDICTABLE"));
8486 /* Only allowed if base reg not in list, or first in list. */
8487 else if ((range & (1 << base_reg))
8488 && (range & ((1 << base_reg) - 1)))
8489 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8493 /* If PUSH/POP has only one register, then use the A2 encoding. */
8494 one_reg = only_one_reg_in_list (range);
8495 if (from_push_pop_mnem && one_reg >= 0)
8497 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8499 inst.instruction &= A_COND_MASK;
8500 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8501 inst.instruction |= one_reg << 12;
8508 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8511 /* ARMv5TE load-consecutive (argument parse)
8520 constraint (inst.operands[0].reg % 2 != 0,
8521 _("first transfer register must be even"));
8522 constraint (inst.operands[1].present
8523 && inst.operands[1].reg != inst.operands[0].reg + 1,
8524 _("can only transfer two consecutive registers"));
8525 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8526 constraint (!inst.operands[2].isreg, _("'[' expected"));
8528 if (!inst.operands[1].present)
8529 inst.operands[1].reg = inst.operands[0].reg + 1;
8531 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8532 register and the first register written; we have to diagnose
8533 overlap between the base and the second register written here. */
8535 if (inst.operands[2].reg == inst.operands[1].reg
8536 && (inst.operands[2].writeback || inst.operands[2].postind))
8537 as_warn (_("base register written back, and overlaps "
8538 "second transfer register"));
8540 if (!(inst.instruction & V4_STR_BIT))
8542 /* For an index-register load, the index register must not overlap the
8543 destination (even if not write-back). */
8544 if (inst.operands[2].immisreg
8545 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8546 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8547 as_warn (_("index register overlaps transfer register"));
8549 inst.instruction |= inst.operands[0].reg << 12;
8550 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8556 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8557 || inst.operands[1].postind || inst.operands[1].writeback
8558 || inst.operands[1].immisreg || inst.operands[1].shifted
8559 || inst.operands[1].negative
8560 /* This can arise if the programmer has written
8562 or if they have mistakenly used a register name as the last
8565 It is very difficult to distinguish between these two cases
8566 because "rX" might actually be a label. ie the register
8567 name has been occluded by a symbol of the same name. So we
8568 just generate a general 'bad addressing mode' type error
8569 message and leave it up to the programmer to discover the
8570 true cause and fix their mistake. */
8571 || (inst.operands[1].reg == REG_PC),
8574 constraint (inst.reloc.exp.X_op != O_constant
8575 || inst.reloc.exp.X_add_number != 0,
8576 _("offset must be zero in ARM encoding"));
8578 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8580 inst.instruction |= inst.operands[0].reg << 12;
8581 inst.instruction |= inst.operands[1].reg << 16;
8582 inst.reloc.type = BFD_RELOC_UNUSED;
8588 constraint (inst.operands[0].reg % 2 != 0,
8589 _("even register required"));
8590 constraint (inst.operands[1].present
8591 && inst.operands[1].reg != inst.operands[0].reg + 1,
8592 _("can only load two consecutive registers"));
8593 /* If op 1 were present and equal to PC, this function wouldn't
8594 have been called in the first place. */
8595 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8597 inst.instruction |= inst.operands[0].reg << 12;
8598 inst.instruction |= inst.operands[2].reg << 16;
8601 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8602 which is not a multiple of four is UNPREDICTABLE. */
8604 check_ldr_r15_aligned (void)
8606 constraint (!(inst.operands[1].immisreg)
8607 && (inst.operands[0].reg == REG_PC
8608 && inst.operands[1].reg == REG_PC
8609 && (inst.reloc.exp.X_add_number & 0x3)),
8610 _("ldr to register 15 must be 4-byte alligned"));
8616 inst.instruction |= inst.operands[0].reg << 12;
8617 if (!inst.operands[1].isreg)
8618 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8620 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8621 check_ldr_r15_aligned ();
8627 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8629 if (inst.operands[1].preind)
8631 constraint (inst.reloc.exp.X_op != O_constant
8632 || inst.reloc.exp.X_add_number != 0,
8633 _("this instruction requires a post-indexed address"));
8635 inst.operands[1].preind = 0;
8636 inst.operands[1].postind = 1;
8637 inst.operands[1].writeback = 1;
8639 inst.instruction |= inst.operands[0].reg << 12;
8640 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8643 /* Halfword and signed-byte load/store operations. */
8648 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8649 inst.instruction |= inst.operands[0].reg << 12;
8650 if (!inst.operands[1].isreg)
8651 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8653 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8659 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8661 if (inst.operands[1].preind)
8663 constraint (inst.reloc.exp.X_op != O_constant
8664 || inst.reloc.exp.X_add_number != 0,
8665 _("this instruction requires a post-indexed address"));
8667 inst.operands[1].preind = 0;
8668 inst.operands[1].postind = 1;
8669 inst.operands[1].writeback = 1;
8671 inst.instruction |= inst.operands[0].reg << 12;
8672 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8675 /* Co-processor register load/store.
8676 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8680 inst.instruction |= inst.operands[0].reg << 8;
8681 inst.instruction |= inst.operands[1].reg << 12;
8682 encode_arm_cp_address (2, TRUE, TRUE, 0);
8688 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8689 if (inst.operands[0].reg == inst.operands[1].reg
8690 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8691 && !(inst.instruction & 0x00400000))
8692 as_tsktsk (_("Rd and Rm should be different in mla"));
8694 inst.instruction |= inst.operands[0].reg << 16;
8695 inst.instruction |= inst.operands[1].reg;
8696 inst.instruction |= inst.operands[2].reg << 8;
8697 inst.instruction |= inst.operands[3].reg << 12;
8703 inst.instruction |= inst.operands[0].reg << 12;
8704 encode_arm_shifter_operand (1);
8707 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8714 top = (inst.instruction & 0x00400000) != 0;
8715 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8716 _(":lower16: not allowed this instruction"));
8717 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8718 _(":upper16: not allowed instruction"));
8719 inst.instruction |= inst.operands[0].reg << 12;
8720 if (inst.reloc.type == BFD_RELOC_UNUSED)
8722 imm = inst.reloc.exp.X_add_number;
8723 /* The value is in two pieces: 0:11, 16:19. */
8724 inst.instruction |= (imm & 0x00000fff);
8725 inst.instruction |= (imm & 0x0000f000) << 4;
8729 static void do_vfp_nsyn_opcode (const char *);
8732 do_vfp_nsyn_mrs (void)
8734 if (inst.operands[0].isvec)
8736 if (inst.operands[1].reg != 1)
8737 first_error (_("operand 1 must be FPSCR"));
8738 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8739 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8740 do_vfp_nsyn_opcode ("fmstat");
8742 else if (inst.operands[1].isvec)
8743 do_vfp_nsyn_opcode ("fmrx");
8751 do_vfp_nsyn_msr (void)
8753 if (inst.operands[0].isvec)
8754 do_vfp_nsyn_opcode ("fmxr");
8764 unsigned Rt = inst.operands[0].reg;
8766 if (thumb_mode && Rt == REG_SP)
8768 inst.error = BAD_SP;
8772 /* APSR_ sets isvec. All other refs to PC are illegal. */
8773 if (!inst.operands[0].isvec && Rt == REG_PC)
8775 inst.error = BAD_PC;
8779 /* If we get through parsing the register name, we just insert the number
8780 generated into the instruction without further validation. */
8781 inst.instruction |= (inst.operands[1].reg << 16);
8782 inst.instruction |= (Rt << 12);
8788 unsigned Rt = inst.operands[1].reg;
8791 reject_bad_reg (Rt);
8792 else if (Rt == REG_PC)
8794 inst.error = BAD_PC;
8798 /* If we get through parsing the register name, we just insert the number
8799 generated into the instruction without further validation. */
8800 inst.instruction |= (inst.operands[0].reg << 16);
8801 inst.instruction |= (Rt << 12);
8809 if (do_vfp_nsyn_mrs () == SUCCESS)
8812 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8813 inst.instruction |= inst.operands[0].reg << 12;
8815 if (inst.operands[1].isreg)
8817 br = inst.operands[1].reg;
8818 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8819 as_bad (_("bad register for mrs"));
8823 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8824 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8826 _("'APSR', 'CPSR' or 'SPSR' expected"));
8827 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8830 inst.instruction |= br;
8833 /* Two possible forms:
8834 "{C|S}PSR_<field>, Rm",
8835 "{C|S}PSR_f, #expression". */
8840 if (do_vfp_nsyn_msr () == SUCCESS)
8843 inst.instruction |= inst.operands[0].imm;
8844 if (inst.operands[1].isreg)
8845 inst.instruction |= inst.operands[1].reg;
8848 inst.instruction |= INST_IMMEDIATE;
8849 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8850 inst.reloc.pc_rel = 0;
8857 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8859 if (!inst.operands[2].present)
8860 inst.operands[2].reg = inst.operands[0].reg;
8861 inst.instruction |= inst.operands[0].reg << 16;
8862 inst.instruction |= inst.operands[1].reg;
8863 inst.instruction |= inst.operands[2].reg << 8;
8865 if (inst.operands[0].reg == inst.operands[1].reg
8866 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8867 as_tsktsk (_("Rd and Rm should be different in mul"));
8870 /* Long Multiply Parser
8871 UMULL RdLo, RdHi, Rm, Rs
8872 SMULL RdLo, RdHi, Rm, Rs
8873 UMLAL RdLo, RdHi, Rm, Rs
8874 SMLAL RdLo, RdHi, Rm, Rs. */
8879 inst.instruction |= inst.operands[0].reg << 12;
8880 inst.instruction |= inst.operands[1].reg << 16;
8881 inst.instruction |= inst.operands[2].reg;
8882 inst.instruction |= inst.operands[3].reg << 8;
8884 /* rdhi and rdlo must be different. */
8885 if (inst.operands[0].reg == inst.operands[1].reg)
8886 as_tsktsk (_("rdhi and rdlo must be different"));
8888 /* rdhi, rdlo and rm must all be different before armv6. */
8889 if ((inst.operands[0].reg == inst.operands[2].reg
8890 || inst.operands[1].reg == inst.operands[2].reg)
8891 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8892 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8898 if (inst.operands[0].present
8899 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8901 /* Architectural NOP hints are CPSR sets with no bits selected. */
8902 inst.instruction &= 0xf0000000;
8903 inst.instruction |= 0x0320f000;
8904 if (inst.operands[0].present)
8905 inst.instruction |= inst.operands[0].imm;
8909 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8910 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8911 Condition defaults to COND_ALWAYS.
8912 Error if Rd, Rn or Rm are R15. */
8917 inst.instruction |= inst.operands[0].reg << 12;
8918 inst.instruction |= inst.operands[1].reg << 16;
8919 inst.instruction |= inst.operands[2].reg;
8920 if (inst.operands[3].present)
8921 encode_arm_shift (3);
8924 /* ARM V6 PKHTB (Argument Parse). */
8929 if (!inst.operands[3].present)
8931 /* If the shift specifier is omitted, turn the instruction
8932 into pkhbt rd, rm, rn. */
8933 inst.instruction &= 0xfff00010;
8934 inst.instruction |= inst.operands[0].reg << 12;
8935 inst.instruction |= inst.operands[1].reg;
8936 inst.instruction |= inst.operands[2].reg << 16;
8940 inst.instruction |= inst.operands[0].reg << 12;
8941 inst.instruction |= inst.operands[1].reg << 16;
8942 inst.instruction |= inst.operands[2].reg;
8943 encode_arm_shift (3);
8947 /* ARMv5TE: Preload-Cache
8948 MP Extensions: Preload for write
8952 Syntactically, like LDR with B=1, W=0, L=1. */
8957 constraint (!inst.operands[0].isreg,
8958 _("'[' expected after PLD mnemonic"));
8959 constraint (inst.operands[0].postind,
8960 _("post-indexed expression used in preload instruction"));
8961 constraint (inst.operands[0].writeback,
8962 _("writeback used in preload instruction"));
8963 constraint (!inst.operands[0].preind,
8964 _("unindexed addressing used in preload instruction"));
8965 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8968 /* ARMv7: PLI <addr_mode> */
8972 constraint (!inst.operands[0].isreg,
8973 _("'[' expected after PLI mnemonic"));
8974 constraint (inst.operands[0].postind,
8975 _("post-indexed expression used in preload instruction"));
8976 constraint (inst.operands[0].writeback,
8977 _("writeback used in preload instruction"));
8978 constraint (!inst.operands[0].preind,
8979 _("unindexed addressing used in preload instruction"));
8980 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8981 inst.instruction &= ~PRE_INDEX;
8987 inst.operands[1] = inst.operands[0];
8988 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8989 inst.operands[0].isreg = 1;
8990 inst.operands[0].writeback = 1;
8991 inst.operands[0].reg = REG_SP;
8992 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
8995 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8996 word at the specified address and the following word
8998 Unconditionally executed.
8999 Error if Rn is R15. */
9004 inst.instruction |= inst.operands[0].reg << 16;
9005 if (inst.operands[0].writeback)
9006 inst.instruction |= WRITE_BACK;
9009 /* ARM V6 ssat (argument parse). */
9014 inst.instruction |= inst.operands[0].reg << 12;
9015 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9016 inst.instruction |= inst.operands[2].reg;
9018 if (inst.operands[3].present)
9019 encode_arm_shift (3);
9022 /* ARM V6 usat (argument parse). */
9027 inst.instruction |= inst.operands[0].reg << 12;
9028 inst.instruction |= inst.operands[1].imm << 16;
9029 inst.instruction |= inst.operands[2].reg;
9031 if (inst.operands[3].present)
9032 encode_arm_shift (3);
9035 /* ARM V6 ssat16 (argument parse). */
9040 inst.instruction |= inst.operands[0].reg << 12;
9041 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9042 inst.instruction |= inst.operands[2].reg;
9048 inst.instruction |= inst.operands[0].reg << 12;
9049 inst.instruction |= inst.operands[1].imm << 16;
9050 inst.instruction |= inst.operands[2].reg;
9053 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9054 preserving the other bits.
9056 setend <endian_specifier>, where <endian_specifier> is either
9062 if (warn_on_deprecated
9063 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9064 as_warn (_("setend use is deprecated for ARMv8"));
9066 if (inst.operands[0].imm)
9067 inst.instruction |= 0x200;
9073 unsigned int Rm = (inst.operands[1].present
9074 ? inst.operands[1].reg
9075 : inst.operands[0].reg);
9077 inst.instruction |= inst.operands[0].reg << 12;
9078 inst.instruction |= Rm;
9079 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9081 inst.instruction |= inst.operands[2].reg << 8;
9082 inst.instruction |= SHIFT_BY_REG;
9083 /* PR 12854: Error on extraneous shifts. */
9084 constraint (inst.operands[2].shifted,
9085 _("extraneous shift as part of operand to shift insn"));
9088 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9094 inst.reloc.type = BFD_RELOC_ARM_SMC;
9095 inst.reloc.pc_rel = 0;
9101 inst.reloc.type = BFD_RELOC_ARM_HVC;
9102 inst.reloc.pc_rel = 0;
9108 inst.reloc.type = BFD_RELOC_ARM_SWI;
9109 inst.reloc.pc_rel = 0;
9112 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9113 SMLAxy{cond} Rd,Rm,Rs,Rn
9114 SMLAWy{cond} Rd,Rm,Rs,Rn
9115 Error if any register is R15. */
9120 inst.instruction |= inst.operands[0].reg << 16;
9121 inst.instruction |= inst.operands[1].reg;
9122 inst.instruction |= inst.operands[2].reg << 8;
9123 inst.instruction |= inst.operands[3].reg << 12;
9126 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9127 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9128 Error if any register is R15.
9129 Warning if Rdlo == Rdhi. */
9134 inst.instruction |= inst.operands[0].reg << 12;
9135 inst.instruction |= inst.operands[1].reg << 16;
9136 inst.instruction |= inst.operands[2].reg;
9137 inst.instruction |= inst.operands[3].reg << 8;
9139 if (inst.operands[0].reg == inst.operands[1].reg)
9140 as_tsktsk (_("rdhi and rdlo must be different"));
9143 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9144 SMULxy{cond} Rd,Rm,Rs
9145 Error if any register is R15. */
9150 inst.instruction |= inst.operands[0].reg << 16;
9151 inst.instruction |= inst.operands[1].reg;
9152 inst.instruction |= inst.operands[2].reg << 8;
9155 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9156 the same for both ARM and Thumb-2. */
9163 if (inst.operands[0].present)
9165 reg = inst.operands[0].reg;
9166 constraint (reg != REG_SP, _("SRS base register must be r13"));
9171 inst.instruction |= reg << 16;
9172 inst.instruction |= inst.operands[1].imm;
9173 if (inst.operands[0].writeback || inst.operands[1].writeback)
9174 inst.instruction |= WRITE_BACK;
9177 /* ARM V6 strex (argument parse). */
9182 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9183 || inst.operands[2].postind || inst.operands[2].writeback
9184 || inst.operands[2].immisreg || inst.operands[2].shifted
9185 || inst.operands[2].negative
9186 /* See comment in do_ldrex(). */
9187 || (inst.operands[2].reg == REG_PC),
9190 constraint (inst.operands[0].reg == inst.operands[1].reg
9191 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9193 constraint (inst.reloc.exp.X_op != O_constant
9194 || inst.reloc.exp.X_add_number != 0,
9195 _("offset must be zero in ARM encoding"));
9197 inst.instruction |= inst.operands[0].reg << 12;
9198 inst.instruction |= inst.operands[1].reg;
9199 inst.instruction |= inst.operands[2].reg << 16;
9200 inst.reloc.type = BFD_RELOC_UNUSED;
9206 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9207 || inst.operands[2].postind || inst.operands[2].writeback
9208 || inst.operands[2].immisreg || inst.operands[2].shifted
9209 || inst.operands[2].negative,
9212 constraint (inst.operands[0].reg == inst.operands[1].reg
9213 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9221 constraint (inst.operands[1].reg % 2 != 0,
9222 _("even register required"));
9223 constraint (inst.operands[2].present
9224 && inst.operands[2].reg != inst.operands[1].reg + 1,
9225 _("can only store two consecutive registers"));
9226 /* If op 2 were present and equal to PC, this function wouldn't
9227 have been called in the first place. */
9228 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9230 constraint (inst.operands[0].reg == inst.operands[1].reg
9231 || inst.operands[0].reg == inst.operands[1].reg + 1
9232 || inst.operands[0].reg == inst.operands[3].reg,
9235 inst.instruction |= inst.operands[0].reg << 12;
9236 inst.instruction |= inst.operands[1].reg;
9237 inst.instruction |= inst.operands[3].reg << 16;
9244 constraint (inst.operands[0].reg == inst.operands[1].reg
9245 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9253 constraint (inst.operands[0].reg == inst.operands[1].reg
9254 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9259 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9260 extends it to 32-bits, and adds the result to a value in another
9261 register. You can specify a rotation by 0, 8, 16, or 24 bits
9262 before extracting the 16-bit value.
9263 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9264 Condition defaults to COND_ALWAYS.
9265 Error if any register uses R15. */
9270 inst.instruction |= inst.operands[0].reg << 12;
9271 inst.instruction |= inst.operands[1].reg << 16;
9272 inst.instruction |= inst.operands[2].reg;
9273 inst.instruction |= inst.operands[3].imm << 10;
9278 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9279 Condition defaults to COND_ALWAYS.
9280 Error if any register uses R15. */
9285 inst.instruction |= inst.operands[0].reg << 12;
9286 inst.instruction |= inst.operands[1].reg;
9287 inst.instruction |= inst.operands[2].imm << 10;
9290 /* VFP instructions. In a logical order: SP variant first, monad
9291 before dyad, arithmetic then move then load/store. */
9294 do_vfp_sp_monadic (void)
9296 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9297 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9301 do_vfp_sp_dyadic (void)
9303 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9304 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9305 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9309 do_vfp_sp_compare_z (void)
9311 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9315 do_vfp_dp_sp_cvt (void)
9317 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9318 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9322 do_vfp_sp_dp_cvt (void)
9324 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9325 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9329 do_vfp_reg_from_sp (void)
9331 inst.instruction |= inst.operands[0].reg << 12;
9332 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9336 do_vfp_reg2_from_sp2 (void)
9338 constraint (inst.operands[2].imm != 2,
9339 _("only two consecutive VFP SP registers allowed here"));
9340 inst.instruction |= inst.operands[0].reg << 12;
9341 inst.instruction |= inst.operands[1].reg << 16;
9342 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9346 do_vfp_sp_from_reg (void)
9348 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9349 inst.instruction |= inst.operands[1].reg << 12;
9353 do_vfp_sp2_from_reg2 (void)
9355 constraint (inst.operands[0].imm != 2,
9356 _("only two consecutive VFP SP registers allowed here"));
9357 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9358 inst.instruction |= inst.operands[1].reg << 12;
9359 inst.instruction |= inst.operands[2].reg << 16;
9363 do_vfp_sp_ldst (void)
9365 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9366 encode_arm_cp_address (1, FALSE, TRUE, 0);
9370 do_vfp_dp_ldst (void)
9372 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9373 encode_arm_cp_address (1, FALSE, TRUE, 0);
9378 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9380 if (inst.operands[0].writeback)
9381 inst.instruction |= WRITE_BACK;
9383 constraint (ldstm_type != VFP_LDSTMIA,
9384 _("this addressing mode requires base-register writeback"));
9385 inst.instruction |= inst.operands[0].reg << 16;
9386 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9387 inst.instruction |= inst.operands[1].imm;
9391 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9395 if (inst.operands[0].writeback)
9396 inst.instruction |= WRITE_BACK;
9398 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9399 _("this addressing mode requires base-register writeback"));
9401 inst.instruction |= inst.operands[0].reg << 16;
9402 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9404 count = inst.operands[1].imm << 1;
9405 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9408 inst.instruction |= count;
9412 do_vfp_sp_ldstmia (void)
9414 vfp_sp_ldstm (VFP_LDSTMIA);
9418 do_vfp_sp_ldstmdb (void)
9420 vfp_sp_ldstm (VFP_LDSTMDB);
9424 do_vfp_dp_ldstmia (void)
9426 vfp_dp_ldstm (VFP_LDSTMIA);
9430 do_vfp_dp_ldstmdb (void)
9432 vfp_dp_ldstm (VFP_LDSTMDB);
9436 do_vfp_xp_ldstmia (void)
9438 vfp_dp_ldstm (VFP_LDSTMIAX);
9442 do_vfp_xp_ldstmdb (void)
9444 vfp_dp_ldstm (VFP_LDSTMDBX);
9448 do_vfp_dp_rd_rm (void)
9450 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9451 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9455 do_vfp_dp_rn_rd (void)
9457 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9458 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9462 do_vfp_dp_rd_rn (void)
9464 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9465 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9469 do_vfp_dp_rd_rn_rm (void)
9471 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9472 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9473 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9479 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9483 do_vfp_dp_rm_rd_rn (void)
9485 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9486 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9487 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9490 /* VFPv3 instructions. */
9492 do_vfp_sp_const (void)
9494 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9495 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9496 inst.instruction |= (inst.operands[1].imm & 0x0f);
9500 do_vfp_dp_const (void)
9502 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9503 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9504 inst.instruction |= (inst.operands[1].imm & 0x0f);
9508 vfp_conv (int srcsize)
9510 int immbits = srcsize - inst.operands[1].imm;
9512 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9514 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9515 i.e. immbits must be in range 0 - 16. */
9516 inst.error = _("immediate value out of range, expected range [0, 16]");
9519 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9521 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9522 i.e. immbits must be in range 0 - 31. */
9523 inst.error = _("immediate value out of range, expected range [1, 32]");
9527 inst.instruction |= (immbits & 1) << 5;
9528 inst.instruction |= (immbits >> 1);
9532 do_vfp_sp_conv_16 (void)
9534 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9539 do_vfp_dp_conv_16 (void)
9541 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9546 do_vfp_sp_conv_32 (void)
9548 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9553 do_vfp_dp_conv_32 (void)
9555 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9559 /* FPA instructions. Also in a logical order. */
9564 inst.instruction |= inst.operands[0].reg << 16;
9565 inst.instruction |= inst.operands[1].reg;
9569 do_fpa_ldmstm (void)
9571 inst.instruction |= inst.operands[0].reg << 12;
9572 switch (inst.operands[1].imm)
9574 case 1: inst.instruction |= CP_T_X; break;
9575 case 2: inst.instruction |= CP_T_Y; break;
9576 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9581 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9583 /* The instruction specified "ea" or "fd", so we can only accept
9584 [Rn]{!}. The instruction does not really support stacking or
9585 unstacking, so we have to emulate these by setting appropriate
9586 bits and offsets. */
9587 constraint (inst.reloc.exp.X_op != O_constant
9588 || inst.reloc.exp.X_add_number != 0,
9589 _("this instruction does not support indexing"));
9591 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9592 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9594 if (!(inst.instruction & INDEX_UP))
9595 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9597 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9599 inst.operands[2].preind = 0;
9600 inst.operands[2].postind = 1;
9604 encode_arm_cp_address (2, TRUE, TRUE, 0);
9607 /* iWMMXt instructions: strictly in alphabetical order. */
9610 do_iwmmxt_tandorc (void)
9612 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9616 do_iwmmxt_textrc (void)
9618 inst.instruction |= inst.operands[0].reg << 12;
9619 inst.instruction |= inst.operands[1].imm;
9623 do_iwmmxt_textrm (void)
9625 inst.instruction |= inst.operands[0].reg << 12;
9626 inst.instruction |= inst.operands[1].reg << 16;
9627 inst.instruction |= inst.operands[2].imm;
9631 do_iwmmxt_tinsr (void)
9633 inst.instruction |= inst.operands[0].reg << 16;
9634 inst.instruction |= inst.operands[1].reg << 12;
9635 inst.instruction |= inst.operands[2].imm;
9639 do_iwmmxt_tmia (void)
9641 inst.instruction |= inst.operands[0].reg << 5;
9642 inst.instruction |= inst.operands[1].reg;
9643 inst.instruction |= inst.operands[2].reg << 12;
9647 do_iwmmxt_waligni (void)
9649 inst.instruction |= inst.operands[0].reg << 12;
9650 inst.instruction |= inst.operands[1].reg << 16;
9651 inst.instruction |= inst.operands[2].reg;
9652 inst.instruction |= inst.operands[3].imm << 20;
9656 do_iwmmxt_wmerge (void)
9658 inst.instruction |= inst.operands[0].reg << 12;
9659 inst.instruction |= inst.operands[1].reg << 16;
9660 inst.instruction |= inst.operands[2].reg;
9661 inst.instruction |= inst.operands[3].imm << 21;
9665 do_iwmmxt_wmov (void)
9667 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9668 inst.instruction |= inst.operands[0].reg << 12;
9669 inst.instruction |= inst.operands[1].reg << 16;
9670 inst.instruction |= inst.operands[1].reg;
9674 do_iwmmxt_wldstbh (void)
9677 inst.instruction |= inst.operands[0].reg << 12;
9679 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9681 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9682 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9686 do_iwmmxt_wldstw (void)
9688 /* RIWR_RIWC clears .isreg for a control register. */
9689 if (!inst.operands[0].isreg)
9691 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9692 inst.instruction |= 0xf0000000;
9695 inst.instruction |= inst.operands[0].reg << 12;
9696 encode_arm_cp_address (1, TRUE, TRUE, 0);
9700 do_iwmmxt_wldstd (void)
9702 inst.instruction |= inst.operands[0].reg << 12;
9703 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9704 && inst.operands[1].immisreg)
9706 inst.instruction &= ~0x1a000ff;
9707 inst.instruction |= (0xf << 28);
9708 if (inst.operands[1].preind)
9709 inst.instruction |= PRE_INDEX;
9710 if (!inst.operands[1].negative)
9711 inst.instruction |= INDEX_UP;
9712 if (inst.operands[1].writeback)
9713 inst.instruction |= WRITE_BACK;
9714 inst.instruction |= inst.operands[1].reg << 16;
9715 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9716 inst.instruction |= inst.operands[1].imm;
9719 encode_arm_cp_address (1, TRUE, FALSE, 0);
9723 do_iwmmxt_wshufh (void)
9725 inst.instruction |= inst.operands[0].reg << 12;
9726 inst.instruction |= inst.operands[1].reg << 16;
9727 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9728 inst.instruction |= (inst.operands[2].imm & 0x0f);
9732 do_iwmmxt_wzero (void)
9734 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9735 inst.instruction |= inst.operands[0].reg;
9736 inst.instruction |= inst.operands[0].reg << 12;
9737 inst.instruction |= inst.operands[0].reg << 16;
9741 do_iwmmxt_wrwrwr_or_imm5 (void)
9743 if (inst.operands[2].isreg)
9746 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9747 _("immediate operand requires iWMMXt2"));
9749 if (inst.operands[2].imm == 0)
9751 switch ((inst.instruction >> 20) & 0xf)
9757 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9758 inst.operands[2].imm = 16;
9759 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9765 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9766 inst.operands[2].imm = 32;
9767 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9774 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9776 wrn = (inst.instruction >> 16) & 0xf;
9777 inst.instruction &= 0xff0fff0f;
9778 inst.instruction |= wrn;
9779 /* Bail out here; the instruction is now assembled. */
9784 /* Map 32 -> 0, etc. */
9785 inst.operands[2].imm &= 0x1f;
9786 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9790 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9791 operations first, then control, shift, and load/store. */
9793 /* Insns like "foo X,Y,Z". */
9796 do_mav_triple (void)
9798 inst.instruction |= inst.operands[0].reg << 16;
9799 inst.instruction |= inst.operands[1].reg;
9800 inst.instruction |= inst.operands[2].reg << 12;
9803 /* Insns like "foo W,X,Y,Z".
9804 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9809 inst.instruction |= inst.operands[0].reg << 5;
9810 inst.instruction |= inst.operands[1].reg << 12;
9811 inst.instruction |= inst.operands[2].reg << 16;
9812 inst.instruction |= inst.operands[3].reg;
9815 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9819 inst.instruction |= inst.operands[1].reg << 12;
9822 /* Maverick shift immediate instructions.
9823 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9824 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9829 int imm = inst.operands[2].imm;
9831 inst.instruction |= inst.operands[0].reg << 12;
9832 inst.instruction |= inst.operands[1].reg << 16;
9834 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9835 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9836 Bit 4 should be 0. */
9837 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9839 inst.instruction |= imm;
9842 /* XScale instructions. Also sorted arithmetic before move. */
9844 /* Xscale multiply-accumulate (argument parse)
9847 MIAxycc acc0,Rm,Rs. */
9852 inst.instruction |= inst.operands[1].reg;
9853 inst.instruction |= inst.operands[2].reg << 12;
9856 /* Xscale move-accumulator-register (argument parse)
9858 MARcc acc0,RdLo,RdHi. */
9863 inst.instruction |= inst.operands[1].reg << 12;
9864 inst.instruction |= inst.operands[2].reg << 16;
9867 /* Xscale move-register-accumulator (argument parse)
9869 MRAcc RdLo,RdHi,acc0. */
9874 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9875 inst.instruction |= inst.operands[0].reg << 12;
9876 inst.instruction |= inst.operands[1].reg << 16;
9879 /* Encoding functions relevant only to Thumb. */
9881 /* inst.operands[i] is a shifted-register operand; encode
9882 it into inst.instruction in the format used by Thumb32. */
9885 encode_thumb32_shifted_operand (int i)
9887 unsigned int value = inst.reloc.exp.X_add_number;
9888 unsigned int shift = inst.operands[i].shift_kind;
9890 constraint (inst.operands[i].immisreg,
9891 _("shift by register not allowed in thumb mode"));
9892 inst.instruction |= inst.operands[i].reg;
9893 if (shift == SHIFT_RRX)
9894 inst.instruction |= SHIFT_ROR << 4;
9897 constraint (inst.reloc.exp.X_op != O_constant,
9898 _("expression too complex"));
9900 constraint (value > 32
9901 || (value == 32 && (shift == SHIFT_LSL
9902 || shift == SHIFT_ROR)),
9903 _("shift expression is too large"));
9907 else if (value == 32)
9910 inst.instruction |= shift << 4;
9911 inst.instruction |= (value & 0x1c) << 10;
9912 inst.instruction |= (value & 0x03) << 6;
9917 /* inst.operands[i] was set up by parse_address. Encode it into a
9918 Thumb32 format load or store instruction. Reject forms that cannot
9919 be used with such instructions. If is_t is true, reject forms that
9920 cannot be used with a T instruction; if is_d is true, reject forms
9921 that cannot be used with a D instruction. If it is a store insn,
9925 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9927 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9929 constraint (!inst.operands[i].isreg,
9930 _("Instruction does not support =N addresses"));
9932 inst.instruction |= inst.operands[i].reg << 16;
9933 if (inst.operands[i].immisreg)
9935 constraint (is_pc, BAD_PC_ADDRESSING);
9936 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9937 constraint (inst.operands[i].negative,
9938 _("Thumb does not support negative register indexing"));
9939 constraint (inst.operands[i].postind,
9940 _("Thumb does not support register post-indexing"));
9941 constraint (inst.operands[i].writeback,
9942 _("Thumb does not support register indexing with writeback"));
9943 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9944 _("Thumb supports only LSL in shifted register indexing"));
9946 inst.instruction |= inst.operands[i].imm;
9947 if (inst.operands[i].shifted)
9949 constraint (inst.reloc.exp.X_op != O_constant,
9950 _("expression too complex"));
9951 constraint (inst.reloc.exp.X_add_number < 0
9952 || inst.reloc.exp.X_add_number > 3,
9953 _("shift out of range"));
9954 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9956 inst.reloc.type = BFD_RELOC_UNUSED;
9958 else if (inst.operands[i].preind)
9960 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9961 constraint (is_t && inst.operands[i].writeback,
9962 _("cannot use writeback with this instruction"));
9963 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
9968 inst.instruction |= 0x01000000;
9969 if (inst.operands[i].writeback)
9970 inst.instruction |= 0x00200000;
9974 inst.instruction |= 0x00000c00;
9975 if (inst.operands[i].writeback)
9976 inst.instruction |= 0x00000100;
9978 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9980 else if (inst.operands[i].postind)
9982 gas_assert (inst.operands[i].writeback);
9983 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9984 constraint (is_t, _("cannot use post-indexing with this instruction"));
9987 inst.instruction |= 0x00200000;
9989 inst.instruction |= 0x00000900;
9990 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9992 else /* unindexed - only for coprocessor */
9993 inst.error = _("instruction does not accept unindexed addressing");
9996 /* Table of Thumb instructions which exist in both 16- and 32-bit
9997 encodings (the latter only in post-V6T2 cores). The index is the
9998 value used in the insns table below. When there is more than one
9999 possible 16-bit encoding for the instruction, this table always
10001 Also contains several pseudo-instructions used during relaxation. */
10002 #define T16_32_TAB \
10003 X(_adc, 4140, eb400000), \
10004 X(_adcs, 4140, eb500000), \
10005 X(_add, 1c00, eb000000), \
10006 X(_adds, 1c00, eb100000), \
10007 X(_addi, 0000, f1000000), \
10008 X(_addis, 0000, f1100000), \
10009 X(_add_pc,000f, f20f0000), \
10010 X(_add_sp,000d, f10d0000), \
10011 X(_adr, 000f, f20f0000), \
10012 X(_and, 4000, ea000000), \
10013 X(_ands, 4000, ea100000), \
10014 X(_asr, 1000, fa40f000), \
10015 X(_asrs, 1000, fa50f000), \
10016 X(_b, e000, f000b000), \
10017 X(_bcond, d000, f0008000), \
10018 X(_bic, 4380, ea200000), \
10019 X(_bics, 4380, ea300000), \
10020 X(_cmn, 42c0, eb100f00), \
10021 X(_cmp, 2800, ebb00f00), \
10022 X(_cpsie, b660, f3af8400), \
10023 X(_cpsid, b670, f3af8600), \
10024 X(_cpy, 4600, ea4f0000), \
10025 X(_dec_sp,80dd, f1ad0d00), \
10026 X(_eor, 4040, ea800000), \
10027 X(_eors, 4040, ea900000), \
10028 X(_inc_sp,00dd, f10d0d00), \
10029 X(_ldmia, c800, e8900000), \
10030 X(_ldr, 6800, f8500000), \
10031 X(_ldrb, 7800, f8100000), \
10032 X(_ldrh, 8800, f8300000), \
10033 X(_ldrsb, 5600, f9100000), \
10034 X(_ldrsh, 5e00, f9300000), \
10035 X(_ldr_pc,4800, f85f0000), \
10036 X(_ldr_pc2,4800, f85f0000), \
10037 X(_ldr_sp,9800, f85d0000), \
10038 X(_lsl, 0000, fa00f000), \
10039 X(_lsls, 0000, fa10f000), \
10040 X(_lsr, 0800, fa20f000), \
10041 X(_lsrs, 0800, fa30f000), \
10042 X(_mov, 2000, ea4f0000), \
10043 X(_movs, 2000, ea5f0000), \
10044 X(_mul, 4340, fb00f000), \
10045 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10046 X(_mvn, 43c0, ea6f0000), \
10047 X(_mvns, 43c0, ea7f0000), \
10048 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10049 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10050 X(_orr, 4300, ea400000), \
10051 X(_orrs, 4300, ea500000), \
10052 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10053 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10054 X(_rev, ba00, fa90f080), \
10055 X(_rev16, ba40, fa90f090), \
10056 X(_revsh, bac0, fa90f0b0), \
10057 X(_ror, 41c0, fa60f000), \
10058 X(_rors, 41c0, fa70f000), \
10059 X(_sbc, 4180, eb600000), \
10060 X(_sbcs, 4180, eb700000), \
10061 X(_stmia, c000, e8800000), \
10062 X(_str, 6000, f8400000), \
10063 X(_strb, 7000, f8000000), \
10064 X(_strh, 8000, f8200000), \
10065 X(_str_sp,9000, f84d0000), \
10066 X(_sub, 1e00, eba00000), \
10067 X(_subs, 1e00, ebb00000), \
10068 X(_subi, 8000, f1a00000), \
10069 X(_subis, 8000, f1b00000), \
10070 X(_sxtb, b240, fa4ff080), \
10071 X(_sxth, b200, fa0ff080), \
10072 X(_tst, 4200, ea100f00), \
10073 X(_uxtb, b2c0, fa5ff080), \
10074 X(_uxth, b280, fa1ff080), \
10075 X(_nop, bf00, f3af8000), \
10076 X(_yield, bf10, f3af8001), \
10077 X(_wfe, bf20, f3af8002), \
10078 X(_wfi, bf30, f3af8003), \
10079 X(_sev, bf40, f3af8004), \
10080 X(_sevl, bf50, f3af8005), \
10081 X(_udf, de00, f7f0a000)
10083 /* To catch errors in encoding functions, the codes are all offset by
10084 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10085 as 16-bit instructions. */
10086 #define X(a,b,c) T_MNEM##a
10087 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10090 #define X(a,b,c) 0x##b
10091 static const unsigned short thumb_op16[] = { T16_32_TAB };
10092 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10095 #define X(a,b,c) 0x##c
10096 static const unsigned int thumb_op32[] = { T16_32_TAB };
10097 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10098 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10102 /* Thumb instruction encoders, in alphabetical order. */
10104 /* ADDW or SUBW. */
10107 do_t_add_sub_w (void)
10111 Rd = inst.operands[0].reg;
10112 Rn = inst.operands[1].reg;
10114 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10115 is the SP-{plus,minus}-immediate form of the instruction. */
10117 constraint (Rd == REG_PC, BAD_PC);
10119 reject_bad_reg (Rd);
10121 inst.instruction |= (Rn << 16) | (Rd << 8);
10122 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10125 /* Parse an add or subtract instruction. We get here with inst.instruction
10126 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10129 do_t_add_sub (void)
10133 Rd = inst.operands[0].reg;
10134 Rs = (inst.operands[1].present
10135 ? inst.operands[1].reg /* Rd, Rs, foo */
10136 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10139 set_it_insn_type_last ();
10141 if (unified_syntax)
10144 bfd_boolean narrow;
10147 flags = (inst.instruction == T_MNEM_adds
10148 || inst.instruction == T_MNEM_subs);
10150 narrow = !in_it_block ();
10152 narrow = in_it_block ();
10153 if (!inst.operands[2].isreg)
10157 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10159 add = (inst.instruction == T_MNEM_add
10160 || inst.instruction == T_MNEM_adds);
10162 if (inst.size_req != 4)
10164 /* Attempt to use a narrow opcode, with relaxation if
10166 if (Rd == REG_SP && Rs == REG_SP && !flags)
10167 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10168 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10169 opcode = T_MNEM_add_sp;
10170 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10171 opcode = T_MNEM_add_pc;
10172 else if (Rd <= 7 && Rs <= 7 && narrow)
10175 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10177 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10181 inst.instruction = THUMB_OP16(opcode);
10182 inst.instruction |= (Rd << 4) | Rs;
10183 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10184 if (inst.size_req != 2)
10185 inst.relax = opcode;
10188 constraint (inst.size_req == 2, BAD_HIREG);
10190 if (inst.size_req == 4
10191 || (inst.size_req != 2 && !opcode))
10195 constraint (add, BAD_PC);
10196 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10197 _("only SUBS PC, LR, #const allowed"));
10198 constraint (inst.reloc.exp.X_op != O_constant,
10199 _("expression too complex"));
10200 constraint (inst.reloc.exp.X_add_number < 0
10201 || inst.reloc.exp.X_add_number > 0xff,
10202 _("immediate value out of range"));
10203 inst.instruction = T2_SUBS_PC_LR
10204 | inst.reloc.exp.X_add_number;
10205 inst.reloc.type = BFD_RELOC_UNUSED;
10208 else if (Rs == REG_PC)
10210 /* Always use addw/subw. */
10211 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10212 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10216 inst.instruction = THUMB_OP32 (inst.instruction);
10217 inst.instruction = (inst.instruction & 0xe1ffffff)
10220 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10222 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10224 inst.instruction |= Rd << 8;
10225 inst.instruction |= Rs << 16;
10230 unsigned int value = inst.reloc.exp.X_add_number;
10231 unsigned int shift = inst.operands[2].shift_kind;
10233 Rn = inst.operands[2].reg;
10234 /* See if we can do this with a 16-bit instruction. */
10235 if (!inst.operands[2].shifted && inst.size_req != 4)
10237 if (Rd > 7 || Rs > 7 || Rn > 7)
10242 inst.instruction = ((inst.instruction == T_MNEM_adds
10243 || inst.instruction == T_MNEM_add)
10245 : T_OPCODE_SUB_R3);
10246 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10250 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10252 /* Thumb-1 cores (except v6-M) require at least one high
10253 register in a narrow non flag setting add. */
10254 if (Rd > 7 || Rn > 7
10255 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10256 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10263 inst.instruction = T_OPCODE_ADD_HI;
10264 inst.instruction |= (Rd & 8) << 4;
10265 inst.instruction |= (Rd & 7);
10266 inst.instruction |= Rn << 3;
10272 constraint (Rd == REG_PC, BAD_PC);
10273 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10274 constraint (Rs == REG_PC, BAD_PC);
10275 reject_bad_reg (Rn);
10277 /* If we get here, it can't be done in 16 bits. */
10278 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10279 _("shift must be constant"));
10280 inst.instruction = THUMB_OP32 (inst.instruction);
10281 inst.instruction |= Rd << 8;
10282 inst.instruction |= Rs << 16;
10283 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10284 _("shift value over 3 not allowed in thumb mode"));
10285 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10286 _("only LSL shift allowed in thumb mode"));
10287 encode_thumb32_shifted_operand (2);
10292 constraint (inst.instruction == T_MNEM_adds
10293 || inst.instruction == T_MNEM_subs,
10296 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10298 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10299 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10302 inst.instruction = (inst.instruction == T_MNEM_add
10303 ? 0x0000 : 0x8000);
10304 inst.instruction |= (Rd << 4) | Rs;
10305 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10309 Rn = inst.operands[2].reg;
10310 constraint (inst.operands[2].shifted, _("unshifted register required"));
10312 /* We now have Rd, Rs, and Rn set to registers. */
10313 if (Rd > 7 || Rs > 7 || Rn > 7)
10315 /* Can't do this for SUB. */
10316 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10317 inst.instruction = T_OPCODE_ADD_HI;
10318 inst.instruction |= (Rd & 8) << 4;
10319 inst.instruction |= (Rd & 7);
10321 inst.instruction |= Rn << 3;
10323 inst.instruction |= Rs << 3;
10325 constraint (1, _("dest must overlap one source register"));
10329 inst.instruction = (inst.instruction == T_MNEM_add
10330 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10331 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10341 Rd = inst.operands[0].reg;
10342 reject_bad_reg (Rd);
10344 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10346 /* Defer to section relaxation. */
10347 inst.relax = inst.instruction;
10348 inst.instruction = THUMB_OP16 (inst.instruction);
10349 inst.instruction |= Rd << 4;
10351 else if (unified_syntax && inst.size_req != 2)
10353 /* Generate a 32-bit opcode. */
10354 inst.instruction = THUMB_OP32 (inst.instruction);
10355 inst.instruction |= Rd << 8;
10356 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10357 inst.reloc.pc_rel = 1;
10361 /* Generate a 16-bit opcode. */
10362 inst.instruction = THUMB_OP16 (inst.instruction);
10363 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10364 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10365 inst.reloc.pc_rel = 1;
10367 inst.instruction |= Rd << 4;
10371 /* Arithmetic instructions for which there is just one 16-bit
10372 instruction encoding, and it allows only two low registers.
10373 For maximal compatibility with ARM syntax, we allow three register
10374 operands even when Thumb-32 instructions are not available, as long
10375 as the first two are identical. For instance, both "sbc r0,r1" and
10376 "sbc r0,r0,r1" are allowed. */
10382 Rd = inst.operands[0].reg;
10383 Rs = (inst.operands[1].present
10384 ? inst.operands[1].reg /* Rd, Rs, foo */
10385 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10386 Rn = inst.operands[2].reg;
10388 reject_bad_reg (Rd);
10389 reject_bad_reg (Rs);
10390 if (inst.operands[2].isreg)
10391 reject_bad_reg (Rn);
10393 if (unified_syntax)
10395 if (!inst.operands[2].isreg)
10397 /* For an immediate, we always generate a 32-bit opcode;
10398 section relaxation will shrink it later if possible. */
10399 inst.instruction = THUMB_OP32 (inst.instruction);
10400 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10401 inst.instruction |= Rd << 8;
10402 inst.instruction |= Rs << 16;
10403 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10407 bfd_boolean narrow;
10409 /* See if we can do this with a 16-bit instruction. */
10410 if (THUMB_SETS_FLAGS (inst.instruction))
10411 narrow = !in_it_block ();
10413 narrow = in_it_block ();
10415 if (Rd > 7 || Rn > 7 || Rs > 7)
10417 if (inst.operands[2].shifted)
10419 if (inst.size_req == 4)
10425 inst.instruction = THUMB_OP16 (inst.instruction);
10426 inst.instruction |= Rd;
10427 inst.instruction |= Rn << 3;
10431 /* If we get here, it can't be done in 16 bits. */
10432 constraint (inst.operands[2].shifted
10433 && inst.operands[2].immisreg,
10434 _("shift must be constant"));
10435 inst.instruction = THUMB_OP32 (inst.instruction);
10436 inst.instruction |= Rd << 8;
10437 inst.instruction |= Rs << 16;
10438 encode_thumb32_shifted_operand (2);
10443 /* On its face this is a lie - the instruction does set the
10444 flags. However, the only supported mnemonic in this mode
10445 says it doesn't. */
10446 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10448 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10449 _("unshifted register required"));
10450 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10451 constraint (Rd != Rs,
10452 _("dest and source1 must be the same register"));
10454 inst.instruction = THUMB_OP16 (inst.instruction);
10455 inst.instruction |= Rd;
10456 inst.instruction |= Rn << 3;
10460 /* Similarly, but for instructions where the arithmetic operation is
10461 commutative, so we can allow either of them to be different from
10462 the destination operand in a 16-bit instruction. For instance, all
10463 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10470 Rd = inst.operands[0].reg;
10471 Rs = (inst.operands[1].present
10472 ? inst.operands[1].reg /* Rd, Rs, foo */
10473 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10474 Rn = inst.operands[2].reg;
10476 reject_bad_reg (Rd);
10477 reject_bad_reg (Rs);
10478 if (inst.operands[2].isreg)
10479 reject_bad_reg (Rn);
10481 if (unified_syntax)
10483 if (!inst.operands[2].isreg)
10485 /* For an immediate, we always generate a 32-bit opcode;
10486 section relaxation will shrink it later if possible. */
10487 inst.instruction = THUMB_OP32 (inst.instruction);
10488 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10489 inst.instruction |= Rd << 8;
10490 inst.instruction |= Rs << 16;
10491 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10495 bfd_boolean narrow;
10497 /* See if we can do this with a 16-bit instruction. */
10498 if (THUMB_SETS_FLAGS (inst.instruction))
10499 narrow = !in_it_block ();
10501 narrow = in_it_block ();
10503 if (Rd > 7 || Rn > 7 || Rs > 7)
10505 if (inst.operands[2].shifted)
10507 if (inst.size_req == 4)
10514 inst.instruction = THUMB_OP16 (inst.instruction);
10515 inst.instruction |= Rd;
10516 inst.instruction |= Rn << 3;
10521 inst.instruction = THUMB_OP16 (inst.instruction);
10522 inst.instruction |= Rd;
10523 inst.instruction |= Rs << 3;
10528 /* If we get here, it can't be done in 16 bits. */
10529 constraint (inst.operands[2].shifted
10530 && inst.operands[2].immisreg,
10531 _("shift must be constant"));
10532 inst.instruction = THUMB_OP32 (inst.instruction);
10533 inst.instruction |= Rd << 8;
10534 inst.instruction |= Rs << 16;
10535 encode_thumb32_shifted_operand (2);
10540 /* On its face this is a lie - the instruction does set the
10541 flags. However, the only supported mnemonic in this mode
10542 says it doesn't. */
10543 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10545 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10546 _("unshifted register required"));
10547 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10549 inst.instruction = THUMB_OP16 (inst.instruction);
10550 inst.instruction |= Rd;
10553 inst.instruction |= Rn << 3;
10555 inst.instruction |= Rs << 3;
10557 constraint (1, _("dest must overlap one source register"));
10565 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10566 constraint (msb > 32, _("bit-field extends past end of register"));
10567 /* The instruction encoding stores the LSB and MSB,
10568 not the LSB and width. */
10569 Rd = inst.operands[0].reg;
10570 reject_bad_reg (Rd);
10571 inst.instruction |= Rd << 8;
10572 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10573 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10574 inst.instruction |= msb - 1;
10583 Rd = inst.operands[0].reg;
10584 reject_bad_reg (Rd);
10586 /* #0 in second position is alternative syntax for bfc, which is
10587 the same instruction but with REG_PC in the Rm field. */
10588 if (!inst.operands[1].isreg)
10592 Rn = inst.operands[1].reg;
10593 reject_bad_reg (Rn);
10596 msb = inst.operands[2].imm + inst.operands[3].imm;
10597 constraint (msb > 32, _("bit-field extends past end of register"));
10598 /* The instruction encoding stores the LSB and MSB,
10599 not the LSB and width. */
10600 inst.instruction |= Rd << 8;
10601 inst.instruction |= Rn << 16;
10602 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10603 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10604 inst.instruction |= msb - 1;
10612 Rd = inst.operands[0].reg;
10613 Rn = inst.operands[1].reg;
10615 reject_bad_reg (Rd);
10616 reject_bad_reg (Rn);
10618 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10619 _("bit-field extends past end of register"));
10620 inst.instruction |= Rd << 8;
10621 inst.instruction |= Rn << 16;
10622 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10623 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10624 inst.instruction |= inst.operands[3].imm - 1;
10627 /* ARM V5 Thumb BLX (argument parse)
10628 BLX <target_addr> which is BLX(1)
10629 BLX <Rm> which is BLX(2)
10630 Unfortunately, there are two different opcodes for this mnemonic.
10631 So, the insns[].value is not used, and the code here zaps values
10632 into inst.instruction.
10634 ??? How to take advantage of the additional two bits of displacement
10635 available in Thumb32 mode? Need new relocation? */
10640 set_it_insn_type_last ();
10642 if (inst.operands[0].isreg)
10644 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10645 /* We have a register, so this is BLX(2). */
10646 inst.instruction |= inst.operands[0].reg << 3;
10650 /* No register. This must be BLX(1). */
10651 inst.instruction = 0xf000e800;
10652 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10664 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10666 if (in_it_block ())
10668 /* Conditional branches inside IT blocks are encoded as unconditional
10670 cond = COND_ALWAYS;
10675 if (cond != COND_ALWAYS)
10676 opcode = T_MNEM_bcond;
10678 opcode = inst.instruction;
10681 && (inst.size_req == 4
10682 || (inst.size_req != 2
10683 && (inst.operands[0].hasreloc
10684 || inst.reloc.exp.X_op == O_constant))))
10686 inst.instruction = THUMB_OP32(opcode);
10687 if (cond == COND_ALWAYS)
10688 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10691 gas_assert (cond != 0xF);
10692 inst.instruction |= cond << 22;
10693 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10698 inst.instruction = THUMB_OP16(opcode);
10699 if (cond == COND_ALWAYS)
10700 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10703 inst.instruction |= cond << 8;
10704 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10706 /* Allow section relaxation. */
10707 if (unified_syntax && inst.size_req != 2)
10708 inst.relax = opcode;
10710 inst.reloc.type = reloc;
10711 inst.reloc.pc_rel = 1;
10714 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10715 between the two is the maximum immediate allowed - which is passed in
10718 do_t_bkpt_hlt1 (int range)
10720 constraint (inst.cond != COND_ALWAYS,
10721 _("instruction is always unconditional"));
10722 if (inst.operands[0].present)
10724 constraint (inst.operands[0].imm > range,
10725 _("immediate value out of range"));
10726 inst.instruction |= inst.operands[0].imm;
10729 set_it_insn_type (NEUTRAL_IT_INSN);
10735 do_t_bkpt_hlt1 (63);
10741 do_t_bkpt_hlt1 (255);
10745 do_t_branch23 (void)
10747 set_it_insn_type_last ();
10748 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10750 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10751 this file. We used to simply ignore the PLT reloc type here --
10752 the branch encoding is now needed to deal with TLSCALL relocs.
10753 So if we see a PLT reloc now, put it back to how it used to be to
10754 keep the preexisting behaviour. */
10755 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10756 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10758 #if defined(OBJ_COFF)
10759 /* If the destination of the branch is a defined symbol which does not have
10760 the THUMB_FUNC attribute, then we must be calling a function which has
10761 the (interfacearm) attribute. We look for the Thumb entry point to that
10762 function and change the branch to refer to that function instead. */
10763 if ( inst.reloc.exp.X_op == O_symbol
10764 && inst.reloc.exp.X_add_symbol != NULL
10765 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10766 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10767 inst.reloc.exp.X_add_symbol =
10768 find_real_start (inst.reloc.exp.X_add_symbol);
10775 set_it_insn_type_last ();
10776 inst.instruction |= inst.operands[0].reg << 3;
10777 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10778 should cause the alignment to be checked once it is known. This is
10779 because BX PC only works if the instruction is word aligned. */
10787 set_it_insn_type_last ();
10788 Rm = inst.operands[0].reg;
10789 reject_bad_reg (Rm);
10790 inst.instruction |= Rm << 16;
10799 Rd = inst.operands[0].reg;
10800 Rm = inst.operands[1].reg;
10802 reject_bad_reg (Rd);
10803 reject_bad_reg (Rm);
10805 inst.instruction |= Rd << 8;
10806 inst.instruction |= Rm << 16;
10807 inst.instruction |= Rm;
10813 set_it_insn_type (OUTSIDE_IT_INSN);
10814 inst.instruction |= inst.operands[0].imm;
10820 set_it_insn_type (OUTSIDE_IT_INSN);
10822 && (inst.operands[1].present || inst.size_req == 4)
10823 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10825 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10826 inst.instruction = 0xf3af8000;
10827 inst.instruction |= imod << 9;
10828 inst.instruction |= inst.operands[0].imm << 5;
10829 if (inst.operands[1].present)
10830 inst.instruction |= 0x100 | inst.operands[1].imm;
10834 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10835 && (inst.operands[0].imm & 4),
10836 _("selected processor does not support 'A' form "
10837 "of this instruction"));
10838 constraint (inst.operands[1].present || inst.size_req == 4,
10839 _("Thumb does not support the 2-argument "
10840 "form of this instruction"));
10841 inst.instruction |= inst.operands[0].imm;
10845 /* THUMB CPY instruction (argument parse). */
10850 if (inst.size_req == 4)
10852 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10853 inst.instruction |= inst.operands[0].reg << 8;
10854 inst.instruction |= inst.operands[1].reg;
10858 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10859 inst.instruction |= (inst.operands[0].reg & 0x7);
10860 inst.instruction |= inst.operands[1].reg << 3;
10867 set_it_insn_type (OUTSIDE_IT_INSN);
10868 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10869 inst.instruction |= inst.operands[0].reg;
10870 inst.reloc.pc_rel = 1;
10871 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10877 inst.instruction |= inst.operands[0].imm;
10883 unsigned Rd, Rn, Rm;
10885 Rd = inst.operands[0].reg;
10886 Rn = (inst.operands[1].present
10887 ? inst.operands[1].reg : Rd);
10888 Rm = inst.operands[2].reg;
10890 reject_bad_reg (Rd);
10891 reject_bad_reg (Rn);
10892 reject_bad_reg (Rm);
10894 inst.instruction |= Rd << 8;
10895 inst.instruction |= Rn << 16;
10896 inst.instruction |= Rm;
10902 if (unified_syntax && inst.size_req == 4)
10903 inst.instruction = THUMB_OP32 (inst.instruction);
10905 inst.instruction = THUMB_OP16 (inst.instruction);
10911 unsigned int cond = inst.operands[0].imm;
10913 set_it_insn_type (IT_INSN);
10914 now_it.mask = (inst.instruction & 0xf) | 0x10;
10916 now_it.warn_deprecated = FALSE;
10918 /* If the condition is a negative condition, invert the mask. */
10919 if ((cond & 0x1) == 0x0)
10921 unsigned int mask = inst.instruction & 0x000f;
10923 if ((mask & 0x7) == 0)
10925 /* No conversion needed. */
10926 now_it.block_length = 1;
10928 else if ((mask & 0x3) == 0)
10931 now_it.block_length = 2;
10933 else if ((mask & 0x1) == 0)
10936 now_it.block_length = 3;
10941 now_it.block_length = 4;
10944 inst.instruction &= 0xfff0;
10945 inst.instruction |= mask;
10948 inst.instruction |= cond << 4;
10951 /* Helper function used for both push/pop and ldm/stm. */
10953 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10957 load = (inst.instruction & (1 << 20)) != 0;
10959 if (mask & (1 << 13))
10960 inst.error = _("SP not allowed in register list");
10962 if ((mask & (1 << base)) != 0
10964 inst.error = _("having the base register in the register list when "
10965 "using write back is UNPREDICTABLE");
10969 if (mask & (1 << 15))
10971 if (mask & (1 << 14))
10972 inst.error = _("LR and PC should not both be in register list");
10974 set_it_insn_type_last ();
10979 if (mask & (1 << 15))
10980 inst.error = _("PC not allowed in register list");
10983 if ((mask & (mask - 1)) == 0)
10985 /* Single register transfers implemented as str/ldr. */
10988 if (inst.instruction & (1 << 23))
10989 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10991 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10995 if (inst.instruction & (1 << 23))
10996 inst.instruction = 0x00800000; /* ia -> [base] */
10998 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11001 inst.instruction |= 0xf8400000;
11003 inst.instruction |= 0x00100000;
11005 mask = ffs (mask) - 1;
11008 else if (writeback)
11009 inst.instruction |= WRITE_BACK;
11011 inst.instruction |= mask;
11012 inst.instruction |= base << 16;
11018 /* This really doesn't seem worth it. */
11019 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11020 _("expression too complex"));
11021 constraint (inst.operands[1].writeback,
11022 _("Thumb load/store multiple does not support {reglist}^"));
11024 if (unified_syntax)
11026 bfd_boolean narrow;
11030 /* See if we can use a 16-bit instruction. */
11031 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11032 && inst.size_req != 4
11033 && !(inst.operands[1].imm & ~0xff))
11035 mask = 1 << inst.operands[0].reg;
11037 if (inst.operands[0].reg <= 7)
11039 if (inst.instruction == T_MNEM_stmia
11040 ? inst.operands[0].writeback
11041 : (inst.operands[0].writeback
11042 == !(inst.operands[1].imm & mask)))
11044 if (inst.instruction == T_MNEM_stmia
11045 && (inst.operands[1].imm & mask)
11046 && (inst.operands[1].imm & (mask - 1)))
11047 as_warn (_("value stored for r%d is UNKNOWN"),
11048 inst.operands[0].reg);
11050 inst.instruction = THUMB_OP16 (inst.instruction);
11051 inst.instruction |= inst.operands[0].reg << 8;
11052 inst.instruction |= inst.operands[1].imm;
11055 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11057 /* This means 1 register in reg list one of 3 situations:
11058 1. Instruction is stmia, but without writeback.
11059 2. lmdia without writeback, but with Rn not in
11061 3. ldmia with writeback, but with Rn in reglist.
11062 Case 3 is UNPREDICTABLE behaviour, so we handle
11063 case 1 and 2 which can be converted into a 16-bit
11064 str or ldr. The SP cases are handled below. */
11065 unsigned long opcode;
11066 /* First, record an error for Case 3. */
11067 if (inst.operands[1].imm & mask
11068 && inst.operands[0].writeback)
11070 _("having the base register in the register list when "
11071 "using write back is UNPREDICTABLE");
11073 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11075 inst.instruction = THUMB_OP16 (opcode);
11076 inst.instruction |= inst.operands[0].reg << 3;
11077 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11081 else if (inst.operands[0] .reg == REG_SP)
11083 if (inst.operands[0].writeback)
11086 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11087 ? T_MNEM_push : T_MNEM_pop);
11088 inst.instruction |= inst.operands[1].imm;
11091 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11094 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11095 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11096 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11104 if (inst.instruction < 0xffff)
11105 inst.instruction = THUMB_OP32 (inst.instruction);
11107 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11108 inst.operands[0].writeback);
11113 constraint (inst.operands[0].reg > 7
11114 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11115 constraint (inst.instruction != T_MNEM_ldmia
11116 && inst.instruction != T_MNEM_stmia,
11117 _("Thumb-2 instruction only valid in unified syntax"));
11118 if (inst.instruction == T_MNEM_stmia)
11120 if (!inst.operands[0].writeback)
11121 as_warn (_("this instruction will write back the base register"));
11122 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11123 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11124 as_warn (_("value stored for r%d is UNKNOWN"),
11125 inst.operands[0].reg);
11129 if (!inst.operands[0].writeback
11130 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11131 as_warn (_("this instruction will write back the base register"));
11132 else if (inst.operands[0].writeback
11133 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11134 as_warn (_("this instruction will not write back the base register"));
11137 inst.instruction = THUMB_OP16 (inst.instruction);
11138 inst.instruction |= inst.operands[0].reg << 8;
11139 inst.instruction |= inst.operands[1].imm;
11146 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11147 || inst.operands[1].postind || inst.operands[1].writeback
11148 || inst.operands[1].immisreg || inst.operands[1].shifted
11149 || inst.operands[1].negative,
11152 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11154 inst.instruction |= inst.operands[0].reg << 12;
11155 inst.instruction |= inst.operands[1].reg << 16;
11156 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11162 if (!inst.operands[1].present)
11164 constraint (inst.operands[0].reg == REG_LR,
11165 _("r14 not allowed as first register "
11166 "when second register is omitted"));
11167 inst.operands[1].reg = inst.operands[0].reg + 1;
11169 constraint (inst.operands[0].reg == inst.operands[1].reg,
11172 inst.instruction |= inst.operands[0].reg << 12;
11173 inst.instruction |= inst.operands[1].reg << 8;
11174 inst.instruction |= inst.operands[2].reg << 16;
11180 unsigned long opcode;
11183 if (inst.operands[0].isreg
11184 && !inst.operands[0].preind
11185 && inst.operands[0].reg == REG_PC)
11186 set_it_insn_type_last ();
11188 opcode = inst.instruction;
11189 if (unified_syntax)
11191 if (!inst.operands[1].isreg)
11193 if (opcode <= 0xffff)
11194 inst.instruction = THUMB_OP32 (opcode);
11195 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11198 if (inst.operands[1].isreg
11199 && !inst.operands[1].writeback
11200 && !inst.operands[1].shifted && !inst.operands[1].postind
11201 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11202 && opcode <= 0xffff
11203 && inst.size_req != 4)
11205 /* Insn may have a 16-bit form. */
11206 Rn = inst.operands[1].reg;
11207 if (inst.operands[1].immisreg)
11209 inst.instruction = THUMB_OP16 (opcode);
11211 if (Rn <= 7 && inst.operands[1].imm <= 7)
11213 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11214 reject_bad_reg (inst.operands[1].imm);
11216 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11217 && opcode != T_MNEM_ldrsb)
11218 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11219 || (Rn == REG_SP && opcode == T_MNEM_str))
11226 if (inst.reloc.pc_rel)
11227 opcode = T_MNEM_ldr_pc2;
11229 opcode = T_MNEM_ldr_pc;
11233 if (opcode == T_MNEM_ldr)
11234 opcode = T_MNEM_ldr_sp;
11236 opcode = T_MNEM_str_sp;
11238 inst.instruction = inst.operands[0].reg << 8;
11242 inst.instruction = inst.operands[0].reg;
11243 inst.instruction |= inst.operands[1].reg << 3;
11245 inst.instruction |= THUMB_OP16 (opcode);
11246 if (inst.size_req == 2)
11247 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11249 inst.relax = opcode;
11253 /* Definitely a 32-bit variant. */
11255 /* Warning for Erratum 752419. */
11256 if (opcode == T_MNEM_ldr
11257 && inst.operands[0].reg == REG_SP
11258 && inst.operands[1].writeback == 1
11259 && !inst.operands[1].immisreg)
11261 if (no_cpu_selected ()
11262 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11263 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11264 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11265 as_warn (_("This instruction may be unpredictable "
11266 "if executed on M-profile cores "
11267 "with interrupts enabled."));
11270 /* Do some validations regarding addressing modes. */
11271 if (inst.operands[1].immisreg)
11272 reject_bad_reg (inst.operands[1].imm);
11274 constraint (inst.operands[1].writeback == 1
11275 && inst.operands[0].reg == inst.operands[1].reg,
11278 inst.instruction = THUMB_OP32 (opcode);
11279 inst.instruction |= inst.operands[0].reg << 12;
11280 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11281 check_ldr_r15_aligned ();
11285 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11287 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11289 /* Only [Rn,Rm] is acceptable. */
11290 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11291 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11292 || inst.operands[1].postind || inst.operands[1].shifted
11293 || inst.operands[1].negative,
11294 _("Thumb does not support this addressing mode"));
11295 inst.instruction = THUMB_OP16 (inst.instruction);
11299 inst.instruction = THUMB_OP16 (inst.instruction);
11300 if (!inst.operands[1].isreg)
11301 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11304 constraint (!inst.operands[1].preind
11305 || inst.operands[1].shifted
11306 || inst.operands[1].writeback,
11307 _("Thumb does not support this addressing mode"));
11308 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11310 constraint (inst.instruction & 0x0600,
11311 _("byte or halfword not valid for base register"));
11312 constraint (inst.operands[1].reg == REG_PC
11313 && !(inst.instruction & THUMB_LOAD_BIT),
11314 _("r15 based store not allowed"));
11315 constraint (inst.operands[1].immisreg,
11316 _("invalid base register for register offset"));
11318 if (inst.operands[1].reg == REG_PC)
11319 inst.instruction = T_OPCODE_LDR_PC;
11320 else if (inst.instruction & THUMB_LOAD_BIT)
11321 inst.instruction = T_OPCODE_LDR_SP;
11323 inst.instruction = T_OPCODE_STR_SP;
11325 inst.instruction |= inst.operands[0].reg << 8;
11326 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11330 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11331 if (!inst.operands[1].immisreg)
11333 /* Immediate offset. */
11334 inst.instruction |= inst.operands[0].reg;
11335 inst.instruction |= inst.operands[1].reg << 3;
11336 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11340 /* Register offset. */
11341 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11342 constraint (inst.operands[1].negative,
11343 _("Thumb does not support this addressing mode"));
11346 switch (inst.instruction)
11348 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11349 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11350 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11351 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11352 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11353 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11354 case 0x5600 /* ldrsb */:
11355 case 0x5e00 /* ldrsh */: break;
11359 inst.instruction |= inst.operands[0].reg;
11360 inst.instruction |= inst.operands[1].reg << 3;
11361 inst.instruction |= inst.operands[1].imm << 6;
11367 if (!inst.operands[1].present)
11369 inst.operands[1].reg = inst.operands[0].reg + 1;
11370 constraint (inst.operands[0].reg == REG_LR,
11371 _("r14 not allowed here"));
11372 constraint (inst.operands[0].reg == REG_R12,
11373 _("r12 not allowed here"));
11376 if (inst.operands[2].writeback
11377 && (inst.operands[0].reg == inst.operands[2].reg
11378 || inst.operands[1].reg == inst.operands[2].reg))
11379 as_warn (_("base register written back, and overlaps "
11380 "one of transfer registers"));
11382 inst.instruction |= inst.operands[0].reg << 12;
11383 inst.instruction |= inst.operands[1].reg << 8;
11384 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11390 inst.instruction |= inst.operands[0].reg << 12;
11391 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11397 unsigned Rd, Rn, Rm, Ra;
11399 Rd = inst.operands[0].reg;
11400 Rn = inst.operands[1].reg;
11401 Rm = inst.operands[2].reg;
11402 Ra = inst.operands[3].reg;
11404 reject_bad_reg (Rd);
11405 reject_bad_reg (Rn);
11406 reject_bad_reg (Rm);
11407 reject_bad_reg (Ra);
11409 inst.instruction |= Rd << 8;
11410 inst.instruction |= Rn << 16;
11411 inst.instruction |= Rm;
11412 inst.instruction |= Ra << 12;
11418 unsigned RdLo, RdHi, Rn, Rm;
11420 RdLo = inst.operands[0].reg;
11421 RdHi = inst.operands[1].reg;
11422 Rn = inst.operands[2].reg;
11423 Rm = inst.operands[3].reg;
11425 reject_bad_reg (RdLo);
11426 reject_bad_reg (RdHi);
11427 reject_bad_reg (Rn);
11428 reject_bad_reg (Rm);
11430 inst.instruction |= RdLo << 12;
11431 inst.instruction |= RdHi << 8;
11432 inst.instruction |= Rn << 16;
11433 inst.instruction |= Rm;
11437 do_t_mov_cmp (void)
11441 Rn = inst.operands[0].reg;
11442 Rm = inst.operands[1].reg;
11445 set_it_insn_type_last ();
11447 if (unified_syntax)
11449 int r0off = (inst.instruction == T_MNEM_mov
11450 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11451 unsigned long opcode;
11452 bfd_boolean narrow;
11453 bfd_boolean low_regs;
11455 low_regs = (Rn <= 7 && Rm <= 7);
11456 opcode = inst.instruction;
11457 if (in_it_block ())
11458 narrow = opcode != T_MNEM_movs;
11460 narrow = opcode != T_MNEM_movs || low_regs;
11461 if (inst.size_req == 4
11462 || inst.operands[1].shifted)
11465 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11466 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11467 && !inst.operands[1].shifted
11471 inst.instruction = T2_SUBS_PC_LR;
11475 if (opcode == T_MNEM_cmp)
11477 constraint (Rn == REG_PC, BAD_PC);
11480 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11482 warn_deprecated_sp (Rm);
11483 /* R15 was documented as a valid choice for Rm in ARMv6,
11484 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11485 tools reject R15, so we do too. */
11486 constraint (Rm == REG_PC, BAD_PC);
11489 reject_bad_reg (Rm);
11491 else if (opcode == T_MNEM_mov
11492 || opcode == T_MNEM_movs)
11494 if (inst.operands[1].isreg)
11496 if (opcode == T_MNEM_movs)
11498 reject_bad_reg (Rn);
11499 reject_bad_reg (Rm);
11503 /* This is mov.n. */
11504 if ((Rn == REG_SP || Rn == REG_PC)
11505 && (Rm == REG_SP || Rm == REG_PC))
11507 as_warn (_("Use of r%u as a source register is "
11508 "deprecated when r%u is the destination "
11509 "register."), Rm, Rn);
11514 /* This is mov.w. */
11515 constraint (Rn == REG_PC, BAD_PC);
11516 constraint (Rm == REG_PC, BAD_PC);
11517 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11521 reject_bad_reg (Rn);
11524 if (!inst.operands[1].isreg)
11526 /* Immediate operand. */
11527 if (!in_it_block () && opcode == T_MNEM_mov)
11529 if (low_regs && narrow)
11531 inst.instruction = THUMB_OP16 (opcode);
11532 inst.instruction |= Rn << 8;
11533 if (inst.size_req == 2)
11534 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11536 inst.relax = opcode;
11540 inst.instruction = THUMB_OP32 (inst.instruction);
11541 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11542 inst.instruction |= Rn << r0off;
11543 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11546 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11547 && (inst.instruction == T_MNEM_mov
11548 || inst.instruction == T_MNEM_movs))
11550 /* Register shifts are encoded as separate shift instructions. */
11551 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11553 if (in_it_block ())
11558 if (inst.size_req == 4)
11561 if (!low_regs || inst.operands[1].imm > 7)
11567 switch (inst.operands[1].shift_kind)
11570 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11573 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11576 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11579 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11585 inst.instruction = opcode;
11588 inst.instruction |= Rn;
11589 inst.instruction |= inst.operands[1].imm << 3;
11594 inst.instruction |= CONDS_BIT;
11596 inst.instruction |= Rn << 8;
11597 inst.instruction |= Rm << 16;
11598 inst.instruction |= inst.operands[1].imm;
11603 /* Some mov with immediate shift have narrow variants.
11604 Register shifts are handled above. */
11605 if (low_regs && inst.operands[1].shifted
11606 && (inst.instruction == T_MNEM_mov
11607 || inst.instruction == T_MNEM_movs))
11609 if (in_it_block ())
11610 narrow = (inst.instruction == T_MNEM_mov);
11612 narrow = (inst.instruction == T_MNEM_movs);
11617 switch (inst.operands[1].shift_kind)
11619 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11620 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11621 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11622 default: narrow = FALSE; break;
11628 inst.instruction |= Rn;
11629 inst.instruction |= Rm << 3;
11630 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11634 inst.instruction = THUMB_OP32 (inst.instruction);
11635 inst.instruction |= Rn << r0off;
11636 encode_thumb32_shifted_operand (1);
11640 switch (inst.instruction)
11643 /* In v4t or v5t a move of two lowregs produces unpredictable
11644 results. Don't allow this. */
11647 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11648 "MOV Rd, Rs with two low registers is not "
11649 "permitted on this architecture");
11650 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11654 inst.instruction = T_OPCODE_MOV_HR;
11655 inst.instruction |= (Rn & 0x8) << 4;
11656 inst.instruction |= (Rn & 0x7);
11657 inst.instruction |= Rm << 3;
11661 /* We know we have low registers at this point.
11662 Generate LSLS Rd, Rs, #0. */
11663 inst.instruction = T_OPCODE_LSL_I;
11664 inst.instruction |= Rn;
11665 inst.instruction |= Rm << 3;
11671 inst.instruction = T_OPCODE_CMP_LR;
11672 inst.instruction |= Rn;
11673 inst.instruction |= Rm << 3;
11677 inst.instruction = T_OPCODE_CMP_HR;
11678 inst.instruction |= (Rn & 0x8) << 4;
11679 inst.instruction |= (Rn & 0x7);
11680 inst.instruction |= Rm << 3;
11687 inst.instruction = THUMB_OP16 (inst.instruction);
11689 /* PR 10443: Do not silently ignore shifted operands. */
11690 constraint (inst.operands[1].shifted,
11691 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11693 if (inst.operands[1].isreg)
11695 if (Rn < 8 && Rm < 8)
11697 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11698 since a MOV instruction produces unpredictable results. */
11699 if (inst.instruction == T_OPCODE_MOV_I8)
11700 inst.instruction = T_OPCODE_ADD_I3;
11702 inst.instruction = T_OPCODE_CMP_LR;
11704 inst.instruction |= Rn;
11705 inst.instruction |= Rm << 3;
11709 if (inst.instruction == T_OPCODE_MOV_I8)
11710 inst.instruction = T_OPCODE_MOV_HR;
11712 inst.instruction = T_OPCODE_CMP_HR;
11718 constraint (Rn > 7,
11719 _("only lo regs allowed with immediate"));
11720 inst.instruction |= Rn << 8;
11721 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11732 top = (inst.instruction & 0x00800000) != 0;
11733 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11735 constraint (top, _(":lower16: not allowed this instruction"));
11736 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11738 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11740 constraint (!top, _(":upper16: not allowed this instruction"));
11741 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11744 Rd = inst.operands[0].reg;
11745 reject_bad_reg (Rd);
11747 inst.instruction |= Rd << 8;
11748 if (inst.reloc.type == BFD_RELOC_UNUSED)
11750 imm = inst.reloc.exp.X_add_number;
11751 inst.instruction |= (imm & 0xf000) << 4;
11752 inst.instruction |= (imm & 0x0800) << 15;
11753 inst.instruction |= (imm & 0x0700) << 4;
11754 inst.instruction |= (imm & 0x00ff);
11759 do_t_mvn_tst (void)
11763 Rn = inst.operands[0].reg;
11764 Rm = inst.operands[1].reg;
11766 if (inst.instruction == T_MNEM_cmp
11767 || inst.instruction == T_MNEM_cmn)
11768 constraint (Rn == REG_PC, BAD_PC);
11770 reject_bad_reg (Rn);
11771 reject_bad_reg (Rm);
11773 if (unified_syntax)
11775 int r0off = (inst.instruction == T_MNEM_mvn
11776 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11777 bfd_boolean narrow;
11779 if (inst.size_req == 4
11780 || inst.instruction > 0xffff
11781 || inst.operands[1].shifted
11782 || Rn > 7 || Rm > 7)
11784 else if (inst.instruction == T_MNEM_cmn
11785 || inst.instruction == T_MNEM_tst)
11787 else if (THUMB_SETS_FLAGS (inst.instruction))
11788 narrow = !in_it_block ();
11790 narrow = in_it_block ();
11792 if (!inst.operands[1].isreg)
11794 /* For an immediate, we always generate a 32-bit opcode;
11795 section relaxation will shrink it later if possible. */
11796 if (inst.instruction < 0xffff)
11797 inst.instruction = THUMB_OP32 (inst.instruction);
11798 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11799 inst.instruction |= Rn << r0off;
11800 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11804 /* See if we can do this with a 16-bit instruction. */
11807 inst.instruction = THUMB_OP16 (inst.instruction);
11808 inst.instruction |= Rn;
11809 inst.instruction |= Rm << 3;
11813 constraint (inst.operands[1].shifted
11814 && inst.operands[1].immisreg,
11815 _("shift must be constant"));
11816 if (inst.instruction < 0xffff)
11817 inst.instruction = THUMB_OP32 (inst.instruction);
11818 inst.instruction |= Rn << r0off;
11819 encode_thumb32_shifted_operand (1);
11825 constraint (inst.instruction > 0xffff
11826 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11827 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11828 _("unshifted register required"));
11829 constraint (Rn > 7 || Rm > 7,
11832 inst.instruction = THUMB_OP16 (inst.instruction);
11833 inst.instruction |= Rn;
11834 inst.instruction |= Rm << 3;
11843 if (do_vfp_nsyn_mrs () == SUCCESS)
11846 Rd = inst.operands[0].reg;
11847 reject_bad_reg (Rd);
11848 inst.instruction |= Rd << 8;
11850 if (inst.operands[1].isreg)
11852 unsigned br = inst.operands[1].reg;
11853 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11854 as_bad (_("bad register for mrs"));
11856 inst.instruction |= br & (0xf << 16);
11857 inst.instruction |= (br & 0x300) >> 4;
11858 inst.instruction |= (br & SPSR_BIT) >> 2;
11862 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11864 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11866 /* PR gas/12698: The constraint is only applied for m_profile.
11867 If the user has specified -march=all, we want to ignore it as
11868 we are building for any CPU type, including non-m variants. */
11869 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11870 constraint ((flags != 0) && m_profile, _("selected processor does "
11871 "not support requested special purpose register"));
11874 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11876 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11877 _("'APSR', 'CPSR' or 'SPSR' expected"));
11879 inst.instruction |= (flags & SPSR_BIT) >> 2;
11880 inst.instruction |= inst.operands[1].imm & 0xff;
11881 inst.instruction |= 0xf0000;
11891 if (do_vfp_nsyn_msr () == SUCCESS)
11894 constraint (!inst.operands[1].isreg,
11895 _("Thumb encoding does not support an immediate here"));
11897 if (inst.operands[0].isreg)
11898 flags = (int)(inst.operands[0].reg);
11900 flags = inst.operands[0].imm;
11902 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11904 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11906 /* PR gas/12698: The constraint is only applied for m_profile.
11907 If the user has specified -march=all, we want to ignore it as
11908 we are building for any CPU type, including non-m variants. */
11909 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11910 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11911 && (bits & ~(PSR_s | PSR_f)) != 0)
11912 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11913 && bits != PSR_f)) && m_profile,
11914 _("selected processor does not support requested special "
11915 "purpose register"));
11918 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11919 "requested special purpose register"));
11921 Rn = inst.operands[1].reg;
11922 reject_bad_reg (Rn);
11924 inst.instruction |= (flags & SPSR_BIT) >> 2;
11925 inst.instruction |= (flags & 0xf0000) >> 8;
11926 inst.instruction |= (flags & 0x300) >> 4;
11927 inst.instruction |= (flags & 0xff);
11928 inst.instruction |= Rn << 16;
11934 bfd_boolean narrow;
11935 unsigned Rd, Rn, Rm;
11937 if (!inst.operands[2].present)
11938 inst.operands[2].reg = inst.operands[0].reg;
11940 Rd = inst.operands[0].reg;
11941 Rn = inst.operands[1].reg;
11942 Rm = inst.operands[2].reg;
11944 if (unified_syntax)
11946 if (inst.size_req == 4
11952 else if (inst.instruction == T_MNEM_muls)
11953 narrow = !in_it_block ();
11955 narrow = in_it_block ();
11959 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11960 constraint (Rn > 7 || Rm > 7,
11967 /* 16-bit MULS/Conditional MUL. */
11968 inst.instruction = THUMB_OP16 (inst.instruction);
11969 inst.instruction |= Rd;
11972 inst.instruction |= Rm << 3;
11974 inst.instruction |= Rn << 3;
11976 constraint (1, _("dest must overlap one source register"));
11980 constraint (inst.instruction != T_MNEM_mul,
11981 _("Thumb-2 MUL must not set flags"));
11983 inst.instruction = THUMB_OP32 (inst.instruction);
11984 inst.instruction |= Rd << 8;
11985 inst.instruction |= Rn << 16;
11986 inst.instruction |= Rm << 0;
11988 reject_bad_reg (Rd);
11989 reject_bad_reg (Rn);
11990 reject_bad_reg (Rm);
11997 unsigned RdLo, RdHi, Rn, Rm;
11999 RdLo = inst.operands[0].reg;
12000 RdHi = inst.operands[1].reg;
12001 Rn = inst.operands[2].reg;
12002 Rm = inst.operands[3].reg;
12004 reject_bad_reg (RdLo);
12005 reject_bad_reg (RdHi);
12006 reject_bad_reg (Rn);
12007 reject_bad_reg (Rm);
12009 inst.instruction |= RdLo << 12;
12010 inst.instruction |= RdHi << 8;
12011 inst.instruction |= Rn << 16;
12012 inst.instruction |= Rm;
12015 as_tsktsk (_("rdhi and rdlo must be different"));
12021 set_it_insn_type (NEUTRAL_IT_INSN);
12023 if (unified_syntax)
12025 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12027 inst.instruction = THUMB_OP32 (inst.instruction);
12028 inst.instruction |= inst.operands[0].imm;
12032 /* PR9722: Check for Thumb2 availability before
12033 generating a thumb2 nop instruction. */
12034 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12036 inst.instruction = THUMB_OP16 (inst.instruction);
12037 inst.instruction |= inst.operands[0].imm << 4;
12040 inst.instruction = 0x46c0;
12045 constraint (inst.operands[0].present,
12046 _("Thumb does not support NOP with hints"));
12047 inst.instruction = 0x46c0;
12054 if (unified_syntax)
12056 bfd_boolean narrow;
12058 if (THUMB_SETS_FLAGS (inst.instruction))
12059 narrow = !in_it_block ();
12061 narrow = in_it_block ();
12062 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12064 if (inst.size_req == 4)
12069 inst.instruction = THUMB_OP32 (inst.instruction);
12070 inst.instruction |= inst.operands[0].reg << 8;
12071 inst.instruction |= inst.operands[1].reg << 16;
12075 inst.instruction = THUMB_OP16 (inst.instruction);
12076 inst.instruction |= inst.operands[0].reg;
12077 inst.instruction |= inst.operands[1].reg << 3;
12082 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12084 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12086 inst.instruction = THUMB_OP16 (inst.instruction);
12087 inst.instruction |= inst.operands[0].reg;
12088 inst.instruction |= inst.operands[1].reg << 3;
12097 Rd = inst.operands[0].reg;
12098 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12100 reject_bad_reg (Rd);
12101 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12102 reject_bad_reg (Rn);
12104 inst.instruction |= Rd << 8;
12105 inst.instruction |= Rn << 16;
12107 if (!inst.operands[2].isreg)
12109 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12110 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12116 Rm = inst.operands[2].reg;
12117 reject_bad_reg (Rm);
12119 constraint (inst.operands[2].shifted
12120 && inst.operands[2].immisreg,
12121 _("shift must be constant"));
12122 encode_thumb32_shifted_operand (2);
12129 unsigned Rd, Rn, Rm;
12131 Rd = inst.operands[0].reg;
12132 Rn = inst.operands[1].reg;
12133 Rm = inst.operands[2].reg;
12135 reject_bad_reg (Rd);
12136 reject_bad_reg (Rn);
12137 reject_bad_reg (Rm);
12139 inst.instruction |= Rd << 8;
12140 inst.instruction |= Rn << 16;
12141 inst.instruction |= Rm;
12142 if (inst.operands[3].present)
12144 unsigned int val = inst.reloc.exp.X_add_number;
12145 constraint (inst.reloc.exp.X_op != O_constant,
12146 _("expression too complex"));
12147 inst.instruction |= (val & 0x1c) << 10;
12148 inst.instruction |= (val & 0x03) << 6;
12155 if (!inst.operands[3].present)
12159 inst.instruction &= ~0x00000020;
12161 /* PR 10168. Swap the Rm and Rn registers. */
12162 Rtmp = inst.operands[1].reg;
12163 inst.operands[1].reg = inst.operands[2].reg;
12164 inst.operands[2].reg = Rtmp;
12172 if (inst.operands[0].immisreg)
12173 reject_bad_reg (inst.operands[0].imm);
12175 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12179 do_t_push_pop (void)
12183 constraint (inst.operands[0].writeback,
12184 _("push/pop do not support {reglist}^"));
12185 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12186 _("expression too complex"));
12188 mask = inst.operands[0].imm;
12189 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12190 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12191 else if (inst.size_req != 4
12192 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12193 ? REG_LR : REG_PC)))
12195 inst.instruction = THUMB_OP16 (inst.instruction);
12196 inst.instruction |= THUMB_PP_PC_LR;
12197 inst.instruction |= mask & 0xff;
12199 else if (unified_syntax)
12201 inst.instruction = THUMB_OP32 (inst.instruction);
12202 encode_thumb2_ldmstm (13, mask, TRUE);
12206 inst.error = _("invalid register list to push/pop instruction");
12216 Rd = inst.operands[0].reg;
12217 Rm = inst.operands[1].reg;
12219 reject_bad_reg (Rd);
12220 reject_bad_reg (Rm);
12222 inst.instruction |= Rd << 8;
12223 inst.instruction |= Rm << 16;
12224 inst.instruction |= Rm;
12232 Rd = inst.operands[0].reg;
12233 Rm = inst.operands[1].reg;
12235 reject_bad_reg (Rd);
12236 reject_bad_reg (Rm);
12238 if (Rd <= 7 && Rm <= 7
12239 && inst.size_req != 4)
12241 inst.instruction = THUMB_OP16 (inst.instruction);
12242 inst.instruction |= Rd;
12243 inst.instruction |= Rm << 3;
12245 else if (unified_syntax)
12247 inst.instruction = THUMB_OP32 (inst.instruction);
12248 inst.instruction |= Rd << 8;
12249 inst.instruction |= Rm << 16;
12250 inst.instruction |= Rm;
12253 inst.error = BAD_HIREG;
12261 Rd = inst.operands[0].reg;
12262 Rm = inst.operands[1].reg;
12264 reject_bad_reg (Rd);
12265 reject_bad_reg (Rm);
12267 inst.instruction |= Rd << 8;
12268 inst.instruction |= Rm;
12276 Rd = inst.operands[0].reg;
12277 Rs = (inst.operands[1].present
12278 ? inst.operands[1].reg /* Rd, Rs, foo */
12279 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12281 reject_bad_reg (Rd);
12282 reject_bad_reg (Rs);
12283 if (inst.operands[2].isreg)
12284 reject_bad_reg (inst.operands[2].reg);
12286 inst.instruction |= Rd << 8;
12287 inst.instruction |= Rs << 16;
12288 if (!inst.operands[2].isreg)
12290 bfd_boolean narrow;
12292 if ((inst.instruction & 0x00100000) != 0)
12293 narrow = !in_it_block ();
12295 narrow = in_it_block ();
12297 if (Rd > 7 || Rs > 7)
12300 if (inst.size_req == 4 || !unified_syntax)
12303 if (inst.reloc.exp.X_op != O_constant
12304 || inst.reloc.exp.X_add_number != 0)
12307 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12308 relaxation, but it doesn't seem worth the hassle. */
12311 inst.reloc.type = BFD_RELOC_UNUSED;
12312 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12313 inst.instruction |= Rs << 3;
12314 inst.instruction |= Rd;
12318 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12319 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12323 encode_thumb32_shifted_operand (2);
12329 if (warn_on_deprecated
12330 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12331 as_warn (_("setend use is deprecated for ARMv8"));
12333 set_it_insn_type (OUTSIDE_IT_INSN);
12334 if (inst.operands[0].imm)
12335 inst.instruction |= 0x8;
12341 if (!inst.operands[1].present)
12342 inst.operands[1].reg = inst.operands[0].reg;
12344 if (unified_syntax)
12346 bfd_boolean narrow;
12349 switch (inst.instruction)
12352 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12354 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12356 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12358 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12362 if (THUMB_SETS_FLAGS (inst.instruction))
12363 narrow = !in_it_block ();
12365 narrow = in_it_block ();
12366 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12368 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12370 if (inst.operands[2].isreg
12371 && (inst.operands[1].reg != inst.operands[0].reg
12372 || inst.operands[2].reg > 7))
12374 if (inst.size_req == 4)
12377 reject_bad_reg (inst.operands[0].reg);
12378 reject_bad_reg (inst.operands[1].reg);
12382 if (inst.operands[2].isreg)
12384 reject_bad_reg (inst.operands[2].reg);
12385 inst.instruction = THUMB_OP32 (inst.instruction);
12386 inst.instruction |= inst.operands[0].reg << 8;
12387 inst.instruction |= inst.operands[1].reg << 16;
12388 inst.instruction |= inst.operands[2].reg;
12390 /* PR 12854: Error on extraneous shifts. */
12391 constraint (inst.operands[2].shifted,
12392 _("extraneous shift as part of operand to shift insn"));
12396 inst.operands[1].shifted = 1;
12397 inst.operands[1].shift_kind = shift_kind;
12398 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12399 ? T_MNEM_movs : T_MNEM_mov);
12400 inst.instruction |= inst.operands[0].reg << 8;
12401 encode_thumb32_shifted_operand (1);
12402 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12403 inst.reloc.type = BFD_RELOC_UNUSED;
12408 if (inst.operands[2].isreg)
12410 switch (shift_kind)
12412 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12413 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12414 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12415 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12419 inst.instruction |= inst.operands[0].reg;
12420 inst.instruction |= inst.operands[2].reg << 3;
12422 /* PR 12854: Error on extraneous shifts. */
12423 constraint (inst.operands[2].shifted,
12424 _("extraneous shift as part of operand to shift insn"));
12428 switch (shift_kind)
12430 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12431 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12432 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12435 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12436 inst.instruction |= inst.operands[0].reg;
12437 inst.instruction |= inst.operands[1].reg << 3;
12443 constraint (inst.operands[0].reg > 7
12444 || inst.operands[1].reg > 7, BAD_HIREG);
12445 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12447 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12449 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12450 constraint (inst.operands[0].reg != inst.operands[1].reg,
12451 _("source1 and dest must be same register"));
12453 switch (inst.instruction)
12455 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12456 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12457 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12458 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12462 inst.instruction |= inst.operands[0].reg;
12463 inst.instruction |= inst.operands[2].reg << 3;
12465 /* PR 12854: Error on extraneous shifts. */
12466 constraint (inst.operands[2].shifted,
12467 _("extraneous shift as part of operand to shift insn"));
12471 switch (inst.instruction)
12473 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12474 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12475 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12476 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12479 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12480 inst.instruction |= inst.operands[0].reg;
12481 inst.instruction |= inst.operands[1].reg << 3;
12489 unsigned Rd, Rn, Rm;
12491 Rd = inst.operands[0].reg;
12492 Rn = inst.operands[1].reg;
12493 Rm = inst.operands[2].reg;
12495 reject_bad_reg (Rd);
12496 reject_bad_reg (Rn);
12497 reject_bad_reg (Rm);
12499 inst.instruction |= Rd << 8;
12500 inst.instruction |= Rn << 16;
12501 inst.instruction |= Rm;
12507 unsigned Rd, Rn, Rm;
12509 Rd = inst.operands[0].reg;
12510 Rm = inst.operands[1].reg;
12511 Rn = inst.operands[2].reg;
12513 reject_bad_reg (Rd);
12514 reject_bad_reg (Rn);
12515 reject_bad_reg (Rm);
12517 inst.instruction |= Rd << 8;
12518 inst.instruction |= Rn << 16;
12519 inst.instruction |= Rm;
12525 unsigned int value = inst.reloc.exp.X_add_number;
12526 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12527 _("SMC is not permitted on this architecture"));
12528 constraint (inst.reloc.exp.X_op != O_constant,
12529 _("expression too complex"));
12530 inst.reloc.type = BFD_RELOC_UNUSED;
12531 inst.instruction |= (value & 0xf000) >> 12;
12532 inst.instruction |= (value & 0x0ff0);
12533 inst.instruction |= (value & 0x000f) << 16;
12534 /* PR gas/15623: SMC instructions must be last in an IT block. */
12535 set_it_insn_type_last ();
12541 unsigned int value = inst.reloc.exp.X_add_number;
12543 inst.reloc.type = BFD_RELOC_UNUSED;
12544 inst.instruction |= (value & 0x0fff);
12545 inst.instruction |= (value & 0xf000) << 4;
12549 do_t_ssat_usat (int bias)
12553 Rd = inst.operands[0].reg;
12554 Rn = inst.operands[2].reg;
12556 reject_bad_reg (Rd);
12557 reject_bad_reg (Rn);
12559 inst.instruction |= Rd << 8;
12560 inst.instruction |= inst.operands[1].imm - bias;
12561 inst.instruction |= Rn << 16;
12563 if (inst.operands[3].present)
12565 offsetT shift_amount = inst.reloc.exp.X_add_number;
12567 inst.reloc.type = BFD_RELOC_UNUSED;
12569 constraint (inst.reloc.exp.X_op != O_constant,
12570 _("expression too complex"));
12572 if (shift_amount != 0)
12574 constraint (shift_amount > 31,
12575 _("shift expression is too large"));
12577 if (inst.operands[3].shift_kind == SHIFT_ASR)
12578 inst.instruction |= 0x00200000; /* sh bit. */
12580 inst.instruction |= (shift_amount & 0x1c) << 10;
12581 inst.instruction |= (shift_amount & 0x03) << 6;
12589 do_t_ssat_usat (1);
12597 Rd = inst.operands[0].reg;
12598 Rn = inst.operands[2].reg;
12600 reject_bad_reg (Rd);
12601 reject_bad_reg (Rn);
12603 inst.instruction |= Rd << 8;
12604 inst.instruction |= inst.operands[1].imm - 1;
12605 inst.instruction |= Rn << 16;
12611 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12612 || inst.operands[2].postind || inst.operands[2].writeback
12613 || inst.operands[2].immisreg || inst.operands[2].shifted
12614 || inst.operands[2].negative,
12617 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12619 inst.instruction |= inst.operands[0].reg << 8;
12620 inst.instruction |= inst.operands[1].reg << 12;
12621 inst.instruction |= inst.operands[2].reg << 16;
12622 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12628 if (!inst.operands[2].present)
12629 inst.operands[2].reg = inst.operands[1].reg + 1;
12631 constraint (inst.operands[0].reg == inst.operands[1].reg
12632 || inst.operands[0].reg == inst.operands[2].reg
12633 || inst.operands[0].reg == inst.operands[3].reg,
12636 inst.instruction |= inst.operands[0].reg;
12637 inst.instruction |= inst.operands[1].reg << 12;
12638 inst.instruction |= inst.operands[2].reg << 8;
12639 inst.instruction |= inst.operands[3].reg << 16;
12645 unsigned Rd, Rn, Rm;
12647 Rd = inst.operands[0].reg;
12648 Rn = inst.operands[1].reg;
12649 Rm = inst.operands[2].reg;
12651 reject_bad_reg (Rd);
12652 reject_bad_reg (Rn);
12653 reject_bad_reg (Rm);
12655 inst.instruction |= Rd << 8;
12656 inst.instruction |= Rn << 16;
12657 inst.instruction |= Rm;
12658 inst.instruction |= inst.operands[3].imm << 4;
12666 Rd = inst.operands[0].reg;
12667 Rm = inst.operands[1].reg;
12669 reject_bad_reg (Rd);
12670 reject_bad_reg (Rm);
12672 if (inst.instruction <= 0xffff
12673 && inst.size_req != 4
12674 && Rd <= 7 && Rm <= 7
12675 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12677 inst.instruction = THUMB_OP16 (inst.instruction);
12678 inst.instruction |= Rd;
12679 inst.instruction |= Rm << 3;
12681 else if (unified_syntax)
12683 if (inst.instruction <= 0xffff)
12684 inst.instruction = THUMB_OP32 (inst.instruction);
12685 inst.instruction |= Rd << 8;
12686 inst.instruction |= Rm;
12687 inst.instruction |= inst.operands[2].imm << 4;
12691 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12692 _("Thumb encoding does not support rotation"));
12693 constraint (1, BAD_HIREG);
12700 /* We have to do the following check manually as ARM_EXT_OS only applies
12702 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12704 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12705 /* This only applies to the v6m howver, not later architectures. */
12706 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12707 as_bad (_("SVC is not permitted on this architecture"));
12708 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12711 inst.reloc.type = BFD_RELOC_ARM_SWI;
12720 half = (inst.instruction & 0x10) != 0;
12721 set_it_insn_type_last ();
12722 constraint (inst.operands[0].immisreg,
12723 _("instruction requires register index"));
12725 Rn = inst.operands[0].reg;
12726 Rm = inst.operands[0].imm;
12728 constraint (Rn == REG_SP, BAD_SP);
12729 reject_bad_reg (Rm);
12731 constraint (!half && inst.operands[0].shifted,
12732 _("instruction does not allow shifted index"));
12733 inst.instruction |= (Rn << 16) | Rm;
12739 if (!inst.operands[0].present)
12740 inst.operands[0].imm = 0;
12742 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
12744 constraint (inst.size_req == 2,
12745 _("immediate value out of range"));
12746 inst.instruction = THUMB_OP32 (inst.instruction);
12747 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
12748 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
12752 inst.instruction = THUMB_OP16 (inst.instruction);
12753 inst.instruction |= inst.operands[0].imm;
12756 set_it_insn_type (NEUTRAL_IT_INSN);
12763 do_t_ssat_usat (0);
12771 Rd = inst.operands[0].reg;
12772 Rn = inst.operands[2].reg;
12774 reject_bad_reg (Rd);
12775 reject_bad_reg (Rn);
12777 inst.instruction |= Rd << 8;
12778 inst.instruction |= inst.operands[1].imm;
12779 inst.instruction |= Rn << 16;
12782 /* Neon instruction encoder helpers. */
12784 /* Encodings for the different types for various Neon opcodes. */
12786 /* An "invalid" code for the following tables. */
12789 struct neon_tab_entry
12792 unsigned float_or_poly;
12793 unsigned scalar_or_imm;
12796 /* Map overloaded Neon opcodes to their respective encodings. */
12797 #define NEON_ENC_TAB \
12798 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12799 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12800 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12801 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12802 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12803 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12804 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12805 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12806 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12807 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12808 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12809 /* Register variants of the following two instructions are encoded as
12810 vcge / vcgt with the operands reversed. */ \
12811 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12812 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12813 X(vfma, N_INV, 0x0000c10, N_INV), \
12814 X(vfms, N_INV, 0x0200c10, N_INV), \
12815 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12816 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12817 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12818 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12819 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12820 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12821 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12822 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12823 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12824 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12825 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12826 X(vshl, 0x0000400, N_INV, 0x0800510), \
12827 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12828 X(vand, 0x0000110, N_INV, 0x0800030), \
12829 X(vbic, 0x0100110, N_INV, 0x0800030), \
12830 X(veor, 0x1000110, N_INV, N_INV), \
12831 X(vorn, 0x0300110, N_INV, 0x0800010), \
12832 X(vorr, 0x0200110, N_INV, 0x0800010), \
12833 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12834 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12835 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12836 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12837 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12838 X(vst1, 0x0000000, 0x0800000, N_INV), \
12839 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12840 X(vst2, 0x0000100, 0x0800100, N_INV), \
12841 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12842 X(vst3, 0x0000200, 0x0800200, N_INV), \
12843 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12844 X(vst4, 0x0000300, 0x0800300, N_INV), \
12845 X(vmovn, 0x1b20200, N_INV, N_INV), \
12846 X(vtrn, 0x1b20080, N_INV, N_INV), \
12847 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12848 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12849 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12850 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12851 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12852 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12853 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12854 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12855 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12856 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12857 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12858 X(vseleq, 0xe000a00, N_INV, N_INV), \
12859 X(vselvs, 0xe100a00, N_INV, N_INV), \
12860 X(vselge, 0xe200a00, N_INV, N_INV), \
12861 X(vselgt, 0xe300a00, N_INV, N_INV), \
12862 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12863 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12864 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12865 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12866 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12867 X(aes, 0x3b00300, N_INV, N_INV), \
12868 X(sha3op, 0x2000c00, N_INV, N_INV), \
12869 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12870 X(sha2op, 0x3ba0380, N_INV, N_INV)
12874 #define X(OPC,I,F,S) N_MNEM_##OPC
12879 static const struct neon_tab_entry neon_enc_tab[] =
12881 #define X(OPC,I,F,S) { (I), (F), (S) }
12886 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12887 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12888 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12889 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12890 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12891 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12892 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12893 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12894 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12895 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12896 #define NEON_ENC_SINGLE_(X) \
12897 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12898 #define NEON_ENC_DOUBLE_(X) \
12899 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12900 #define NEON_ENC_FPV8_(X) \
12901 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12903 #define NEON_ENCODE(type, inst) \
12906 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12907 inst.is_neon = 1; \
12911 #define check_neon_suffixes \
12914 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12916 as_bad (_("invalid neon suffix for non neon instruction")); \
12922 /* Define shapes for instruction operands. The following mnemonic characters
12923 are used in this table:
12925 F - VFP S<n> register
12926 D - Neon D<n> register
12927 Q - Neon Q<n> register
12931 L - D<n> register list
12933 This table is used to generate various data:
12934 - enumerations of the form NS_DDR to be used as arguments to
12936 - a table classifying shapes into single, double, quad, mixed.
12937 - a table used to drive neon_select_shape. */
12939 #define NEON_SHAPE_DEF \
12940 X(3, (D, D, D), DOUBLE), \
12941 X(3, (Q, Q, Q), QUAD), \
12942 X(3, (D, D, I), DOUBLE), \
12943 X(3, (Q, Q, I), QUAD), \
12944 X(3, (D, D, S), DOUBLE), \
12945 X(3, (Q, Q, S), QUAD), \
12946 X(2, (D, D), DOUBLE), \
12947 X(2, (Q, Q), QUAD), \
12948 X(2, (D, S), DOUBLE), \
12949 X(2, (Q, S), QUAD), \
12950 X(2, (D, R), DOUBLE), \
12951 X(2, (Q, R), QUAD), \
12952 X(2, (D, I), DOUBLE), \
12953 X(2, (Q, I), QUAD), \
12954 X(3, (D, L, D), DOUBLE), \
12955 X(2, (D, Q), MIXED), \
12956 X(2, (Q, D), MIXED), \
12957 X(3, (D, Q, I), MIXED), \
12958 X(3, (Q, D, I), MIXED), \
12959 X(3, (Q, D, D), MIXED), \
12960 X(3, (D, Q, Q), MIXED), \
12961 X(3, (Q, Q, D), MIXED), \
12962 X(3, (Q, D, S), MIXED), \
12963 X(3, (D, Q, S), MIXED), \
12964 X(4, (D, D, D, I), DOUBLE), \
12965 X(4, (Q, Q, Q, I), QUAD), \
12966 X(2, (F, F), SINGLE), \
12967 X(3, (F, F, F), SINGLE), \
12968 X(2, (F, I), SINGLE), \
12969 X(2, (F, D), MIXED), \
12970 X(2, (D, F), MIXED), \
12971 X(3, (F, F, I), MIXED), \
12972 X(4, (R, R, F, F), SINGLE), \
12973 X(4, (F, F, R, R), SINGLE), \
12974 X(3, (D, R, R), DOUBLE), \
12975 X(3, (R, R, D), DOUBLE), \
12976 X(2, (S, R), SINGLE), \
12977 X(2, (R, S), SINGLE), \
12978 X(2, (F, R), SINGLE), \
12979 X(2, (R, F), SINGLE)
12981 #define S2(A,B) NS_##A##B
12982 #define S3(A,B,C) NS_##A##B##C
12983 #define S4(A,B,C,D) NS_##A##B##C##D
12985 #define X(N, L, C) S##N L
12998 enum neon_shape_class
13006 #define X(N, L, C) SC_##C
13008 static enum neon_shape_class neon_shape_class[] =
13026 /* Register widths of above. */
13027 static unsigned neon_shape_el_size[] =
13038 struct neon_shape_info
13041 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13044 #define S2(A,B) { SE_##A, SE_##B }
13045 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13046 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13048 #define X(N, L, C) { N, S##N L }
13050 static struct neon_shape_info neon_shape_tab[] =
13060 /* Bit masks used in type checking given instructions.
13061 'N_EQK' means the type must be the same as (or based on in some way) the key
13062 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13063 set, various other bits can be set as well in order to modify the meaning of
13064 the type constraint. */
13066 enum neon_type_mask
13090 N_KEY = 0x1000000, /* Key element (main type specifier). */
13091 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13092 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13093 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13094 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13095 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13096 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13097 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13098 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13099 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13100 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13102 N_MAX_NONSPECIAL = N_P64
13105 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13107 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13108 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13109 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13110 #define N_SUF_32 (N_SU_32 | N_F32)
13111 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13112 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13114 /* Pass this as the first type argument to neon_check_type to ignore types
13116 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13118 /* Select a "shape" for the current instruction (describing register types or
13119 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13120 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13121 function of operand parsing, so this function doesn't need to be called.
13122 Shapes should be listed in order of decreasing length. */
13124 static enum neon_shape
13125 neon_select_shape (enum neon_shape shape, ...)
13128 enum neon_shape first_shape = shape;
13130 /* Fix missing optional operands. FIXME: we don't know at this point how
13131 many arguments we should have, so this makes the assumption that we have
13132 > 1. This is true of all current Neon opcodes, I think, but may not be
13133 true in the future. */
13134 if (!inst.operands[1].present)
13135 inst.operands[1] = inst.operands[0];
13137 va_start (ap, shape);
13139 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13144 for (j = 0; j < neon_shape_tab[shape].els; j++)
13146 if (!inst.operands[j].present)
13152 switch (neon_shape_tab[shape].el[j])
13155 if (!(inst.operands[j].isreg
13156 && inst.operands[j].isvec
13157 && inst.operands[j].issingle
13158 && !inst.operands[j].isquad))
13163 if (!(inst.operands[j].isreg
13164 && inst.operands[j].isvec
13165 && !inst.operands[j].isquad
13166 && !inst.operands[j].issingle))
13171 if (!(inst.operands[j].isreg
13172 && !inst.operands[j].isvec))
13177 if (!(inst.operands[j].isreg
13178 && inst.operands[j].isvec
13179 && inst.operands[j].isquad
13180 && !inst.operands[j].issingle))
13185 if (!(!inst.operands[j].isreg
13186 && !inst.operands[j].isscalar))
13191 if (!(!inst.operands[j].isreg
13192 && inst.operands[j].isscalar))
13202 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13203 /* We've matched all the entries in the shape table, and we don't
13204 have any left over operands which have not been matched. */
13210 if (shape == NS_NULL && first_shape != NS_NULL)
13211 first_error (_("invalid instruction shape"));
13216 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13217 means the Q bit should be set). */
13220 neon_quad (enum neon_shape shape)
13222 return neon_shape_class[shape] == SC_QUAD;
13226 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13229 /* Allow modification to be made to types which are constrained to be
13230 based on the key element, based on bits set alongside N_EQK. */
13231 if ((typebits & N_EQK) != 0)
13233 if ((typebits & N_HLF) != 0)
13235 else if ((typebits & N_DBL) != 0)
13237 if ((typebits & N_SGN) != 0)
13238 *g_type = NT_signed;
13239 else if ((typebits & N_UNS) != 0)
13240 *g_type = NT_unsigned;
13241 else if ((typebits & N_INT) != 0)
13242 *g_type = NT_integer;
13243 else if ((typebits & N_FLT) != 0)
13244 *g_type = NT_float;
13245 else if ((typebits & N_SIZ) != 0)
13246 *g_type = NT_untyped;
13250 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13251 operand type, i.e. the single type specified in a Neon instruction when it
13252 is the only one given. */
13254 static struct neon_type_el
13255 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13257 struct neon_type_el dest = *key;
13259 gas_assert ((thisarg & N_EQK) != 0);
13261 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13266 /* Convert Neon type and size into compact bitmask representation. */
13268 static enum neon_type_mask
13269 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13276 case 8: return N_8;
13277 case 16: return N_16;
13278 case 32: return N_32;
13279 case 64: return N_64;
13287 case 8: return N_I8;
13288 case 16: return N_I16;
13289 case 32: return N_I32;
13290 case 64: return N_I64;
13298 case 16: return N_F16;
13299 case 32: return N_F32;
13300 case 64: return N_F64;
13308 case 8: return N_P8;
13309 case 16: return N_P16;
13310 case 64: return N_P64;
13318 case 8: return N_S8;
13319 case 16: return N_S16;
13320 case 32: return N_S32;
13321 case 64: return N_S64;
13329 case 8: return N_U8;
13330 case 16: return N_U16;
13331 case 32: return N_U32;
13332 case 64: return N_U64;
13343 /* Convert compact Neon bitmask type representation to a type and size. Only
13344 handles the case where a single bit is set in the mask. */
13347 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13348 enum neon_type_mask mask)
13350 if ((mask & N_EQK) != 0)
13353 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13355 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13357 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13359 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13364 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13366 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13367 *type = NT_unsigned;
13368 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13369 *type = NT_integer;
13370 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13371 *type = NT_untyped;
13372 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13374 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13382 /* Modify a bitmask of allowed types. This is only needed for type
13386 modify_types_allowed (unsigned allowed, unsigned mods)
13389 enum neon_el_type type;
13395 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13397 if (el_type_of_type_chk (&type, &size,
13398 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13400 neon_modify_type_size (mods, &type, &size);
13401 destmask |= type_chk_of_el_type (type, size);
13408 /* Check type and return type classification.
13409 The manual states (paraphrase): If one datatype is given, it indicates the
13411 - the second operand, if there is one
13412 - the operand, if there is no second operand
13413 - the result, if there are no operands.
13414 This isn't quite good enough though, so we use a concept of a "key" datatype
13415 which is set on a per-instruction basis, which is the one which matters when
13416 only one data type is written.
13417 Note: this function has side-effects (e.g. filling in missing operands). All
13418 Neon instructions should call it before performing bit encoding. */
13420 static struct neon_type_el
13421 neon_check_type (unsigned els, enum neon_shape ns, ...)
13424 unsigned i, pass, key_el = 0;
13425 unsigned types[NEON_MAX_TYPE_ELS];
13426 enum neon_el_type k_type = NT_invtype;
13427 unsigned k_size = -1u;
13428 struct neon_type_el badtype = {NT_invtype, -1};
13429 unsigned key_allowed = 0;
13431 /* Optional registers in Neon instructions are always (not) in operand 1.
13432 Fill in the missing operand here, if it was omitted. */
13433 if (els > 1 && !inst.operands[1].present)
13434 inst.operands[1] = inst.operands[0];
13436 /* Suck up all the varargs. */
13438 for (i = 0; i < els; i++)
13440 unsigned thisarg = va_arg (ap, unsigned);
13441 if (thisarg == N_IGNORE_TYPE)
13446 types[i] = thisarg;
13447 if ((thisarg & N_KEY) != 0)
13452 if (inst.vectype.elems > 0)
13453 for (i = 0; i < els; i++)
13454 if (inst.operands[i].vectype.type != NT_invtype)
13456 first_error (_("types specified in both the mnemonic and operands"));
13460 /* Duplicate inst.vectype elements here as necessary.
13461 FIXME: No idea if this is exactly the same as the ARM assembler,
13462 particularly when an insn takes one register and one non-register
13464 if (inst.vectype.elems == 1 && els > 1)
13467 inst.vectype.elems = els;
13468 inst.vectype.el[key_el] = inst.vectype.el[0];
13469 for (j = 0; j < els; j++)
13471 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13474 else if (inst.vectype.elems == 0 && els > 0)
13477 /* No types were given after the mnemonic, so look for types specified
13478 after each operand. We allow some flexibility here; as long as the
13479 "key" operand has a type, we can infer the others. */
13480 for (j = 0; j < els; j++)
13481 if (inst.operands[j].vectype.type != NT_invtype)
13482 inst.vectype.el[j] = inst.operands[j].vectype;
13484 if (inst.operands[key_el].vectype.type != NT_invtype)
13486 for (j = 0; j < els; j++)
13487 if (inst.operands[j].vectype.type == NT_invtype)
13488 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13493 first_error (_("operand types can't be inferred"));
13497 else if (inst.vectype.elems != els)
13499 first_error (_("type specifier has the wrong number of parts"));
13503 for (pass = 0; pass < 2; pass++)
13505 for (i = 0; i < els; i++)
13507 unsigned thisarg = types[i];
13508 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13509 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13510 enum neon_el_type g_type = inst.vectype.el[i].type;
13511 unsigned g_size = inst.vectype.el[i].size;
13513 /* Decay more-specific signed & unsigned types to sign-insensitive
13514 integer types if sign-specific variants are unavailable. */
13515 if ((g_type == NT_signed || g_type == NT_unsigned)
13516 && (types_allowed & N_SU_ALL) == 0)
13517 g_type = NT_integer;
13519 /* If only untyped args are allowed, decay any more specific types to
13520 them. Some instructions only care about signs for some element
13521 sizes, so handle that properly. */
13522 if (((types_allowed & N_UNT) == 0)
13523 && ((g_size == 8 && (types_allowed & N_8) != 0)
13524 || (g_size == 16 && (types_allowed & N_16) != 0)
13525 || (g_size == 32 && (types_allowed & N_32) != 0)
13526 || (g_size == 64 && (types_allowed & N_64) != 0)))
13527 g_type = NT_untyped;
13531 if ((thisarg & N_KEY) != 0)
13535 key_allowed = thisarg & ~N_KEY;
13540 if ((thisarg & N_VFP) != 0)
13542 enum neon_shape_el regshape;
13543 unsigned regwidth, match;
13545 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13548 first_error (_("invalid instruction shape"));
13551 regshape = neon_shape_tab[ns].el[i];
13552 regwidth = neon_shape_el_size[regshape];
13554 /* In VFP mode, operands must match register widths. If we
13555 have a key operand, use its width, else use the width of
13556 the current operand. */
13562 if (regwidth != match)
13564 first_error (_("operand size must match register width"));
13569 if ((thisarg & N_EQK) == 0)
13571 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13573 if ((given_type & types_allowed) == 0)
13575 first_error (_("bad type in Neon instruction"));
13581 enum neon_el_type mod_k_type = k_type;
13582 unsigned mod_k_size = k_size;
13583 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13584 if (g_type != mod_k_type || g_size != mod_k_size)
13586 first_error (_("inconsistent types in Neon instruction"));
13594 return inst.vectype.el[key_el];
13597 /* Neon-style VFP instruction forwarding. */
13599 /* Thumb VFP instructions have 0xE in the condition field. */
13602 do_vfp_cond_or_thumb (void)
13607 inst.instruction |= 0xe0000000;
13609 inst.instruction |= inst.cond << 28;
13612 /* Look up and encode a simple mnemonic, for use as a helper function for the
13613 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13614 etc. It is assumed that operand parsing has already been done, and that the
13615 operands are in the form expected by the given opcode (this isn't necessarily
13616 the same as the form in which they were parsed, hence some massaging must
13617 take place before this function is called).
13618 Checks current arch version against that in the looked-up opcode. */
13621 do_vfp_nsyn_opcode (const char *opname)
13623 const struct asm_opcode *opcode;
13625 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13630 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13631 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13638 inst.instruction = opcode->tvalue;
13639 opcode->tencode ();
13643 inst.instruction = (inst.cond << 28) | opcode->avalue;
13644 opcode->aencode ();
13649 do_vfp_nsyn_add_sub (enum neon_shape rs)
13651 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13656 do_vfp_nsyn_opcode ("fadds");
13658 do_vfp_nsyn_opcode ("fsubs");
13663 do_vfp_nsyn_opcode ("faddd");
13665 do_vfp_nsyn_opcode ("fsubd");
13669 /* Check operand types to see if this is a VFP instruction, and if so call
13673 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13675 enum neon_shape rs;
13676 struct neon_type_el et;
13681 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13682 et = neon_check_type (2, rs,
13683 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13687 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13688 et = neon_check_type (3, rs,
13689 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13696 if (et.type != NT_invtype)
13707 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13709 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13714 do_vfp_nsyn_opcode ("fmacs");
13716 do_vfp_nsyn_opcode ("fnmacs");
13721 do_vfp_nsyn_opcode ("fmacd");
13723 do_vfp_nsyn_opcode ("fnmacd");
13728 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13730 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13735 do_vfp_nsyn_opcode ("ffmas");
13737 do_vfp_nsyn_opcode ("ffnmas");
13742 do_vfp_nsyn_opcode ("ffmad");
13744 do_vfp_nsyn_opcode ("ffnmad");
13749 do_vfp_nsyn_mul (enum neon_shape rs)
13752 do_vfp_nsyn_opcode ("fmuls");
13754 do_vfp_nsyn_opcode ("fmuld");
13758 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13760 int is_neg = (inst.instruction & 0x80) != 0;
13761 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13766 do_vfp_nsyn_opcode ("fnegs");
13768 do_vfp_nsyn_opcode ("fabss");
13773 do_vfp_nsyn_opcode ("fnegd");
13775 do_vfp_nsyn_opcode ("fabsd");
13779 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13780 insns belong to Neon, and are handled elsewhere. */
13783 do_vfp_nsyn_ldm_stm (int is_dbmode)
13785 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13789 do_vfp_nsyn_opcode ("fldmdbs");
13791 do_vfp_nsyn_opcode ("fldmias");
13796 do_vfp_nsyn_opcode ("fstmdbs");
13798 do_vfp_nsyn_opcode ("fstmias");
13803 do_vfp_nsyn_sqrt (void)
13805 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13806 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13809 do_vfp_nsyn_opcode ("fsqrts");
13811 do_vfp_nsyn_opcode ("fsqrtd");
13815 do_vfp_nsyn_div (void)
13817 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13818 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13819 N_F32 | N_F64 | N_KEY | N_VFP);
13822 do_vfp_nsyn_opcode ("fdivs");
13824 do_vfp_nsyn_opcode ("fdivd");
13828 do_vfp_nsyn_nmul (void)
13830 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13831 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13832 N_F32 | N_F64 | N_KEY | N_VFP);
13836 NEON_ENCODE (SINGLE, inst);
13837 do_vfp_sp_dyadic ();
13841 NEON_ENCODE (DOUBLE, inst);
13842 do_vfp_dp_rd_rn_rm ();
13844 do_vfp_cond_or_thumb ();
13848 do_vfp_nsyn_cmp (void)
13850 if (inst.operands[1].isreg)
13852 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13853 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13857 NEON_ENCODE (SINGLE, inst);
13858 do_vfp_sp_monadic ();
13862 NEON_ENCODE (DOUBLE, inst);
13863 do_vfp_dp_rd_rm ();
13868 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13869 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13871 switch (inst.instruction & 0x0fffffff)
13874 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13877 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13885 NEON_ENCODE (SINGLE, inst);
13886 do_vfp_sp_compare_z ();
13890 NEON_ENCODE (DOUBLE, inst);
13894 do_vfp_cond_or_thumb ();
13898 nsyn_insert_sp (void)
13900 inst.operands[1] = inst.operands[0];
13901 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13902 inst.operands[0].reg = REG_SP;
13903 inst.operands[0].isreg = 1;
13904 inst.operands[0].writeback = 1;
13905 inst.operands[0].present = 1;
13909 do_vfp_nsyn_push (void)
13912 if (inst.operands[1].issingle)
13913 do_vfp_nsyn_opcode ("fstmdbs");
13915 do_vfp_nsyn_opcode ("fstmdbd");
13919 do_vfp_nsyn_pop (void)
13922 if (inst.operands[1].issingle)
13923 do_vfp_nsyn_opcode ("fldmias");
13925 do_vfp_nsyn_opcode ("fldmiad");
13928 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13929 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13932 neon_dp_fixup (struct arm_it* insn)
13934 unsigned int i = insn->instruction;
13939 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13950 insn->instruction = i;
13953 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13957 neon_logbits (unsigned x)
13959 return ffs (x) - 4;
13962 #define LOW4(R) ((R) & 0xf)
13963 #define HI1(R) (((R) >> 4) & 1)
13965 /* Encode insns with bit pattern:
13967 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13968 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
13970 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13971 different meaning for some instruction. */
13974 neon_three_same (int isquad, int ubit, int size)
13976 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13977 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13978 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13979 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13980 inst.instruction |= LOW4 (inst.operands[2].reg);
13981 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13982 inst.instruction |= (isquad != 0) << 6;
13983 inst.instruction |= (ubit != 0) << 24;
13985 inst.instruction |= neon_logbits (size) << 20;
13987 neon_dp_fixup (&inst);
13990 /* Encode instructions of the form:
13992 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
13993 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
13995 Don't write size if SIZE == -1. */
13998 neon_two_same (int qbit, int ubit, int size)
14000 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14001 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14002 inst.instruction |= LOW4 (inst.operands[1].reg);
14003 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14004 inst.instruction |= (qbit != 0) << 6;
14005 inst.instruction |= (ubit != 0) << 24;
14008 inst.instruction |= neon_logbits (size) << 18;
14010 neon_dp_fixup (&inst);
14013 /* Neon instruction encoders, in approximate order of appearance. */
14016 do_neon_dyadic_i_su (void)
14018 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14019 struct neon_type_el et = neon_check_type (3, rs,
14020 N_EQK, N_EQK, N_SU_32 | N_KEY);
14021 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14025 do_neon_dyadic_i64_su (void)
14027 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14028 struct neon_type_el et = neon_check_type (3, rs,
14029 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14030 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14034 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14037 unsigned size = et.size >> 3;
14038 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14039 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14040 inst.instruction |= LOW4 (inst.operands[1].reg);
14041 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14042 inst.instruction |= (isquad != 0) << 6;
14043 inst.instruction |= immbits << 16;
14044 inst.instruction |= (size >> 3) << 7;
14045 inst.instruction |= (size & 0x7) << 19;
14047 inst.instruction |= (uval != 0) << 24;
14049 neon_dp_fixup (&inst);
14053 do_neon_shl_imm (void)
14055 if (!inst.operands[2].isreg)
14057 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14058 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14059 NEON_ENCODE (IMMED, inst);
14060 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
14064 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14065 struct neon_type_el et = neon_check_type (3, rs,
14066 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14069 /* VSHL/VQSHL 3-register variants have syntax such as:
14071 whereas other 3-register operations encoded by neon_three_same have
14074 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14076 tmp = inst.operands[2].reg;
14077 inst.operands[2].reg = inst.operands[1].reg;
14078 inst.operands[1].reg = tmp;
14079 NEON_ENCODE (INTEGER, inst);
14080 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14085 do_neon_qshl_imm (void)
14087 if (!inst.operands[2].isreg)
14089 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14090 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14092 NEON_ENCODE (IMMED, inst);
14093 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14094 inst.operands[2].imm);
14098 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14099 struct neon_type_el et = neon_check_type (3, rs,
14100 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14103 /* See note in do_neon_shl_imm. */
14104 tmp = inst.operands[2].reg;
14105 inst.operands[2].reg = inst.operands[1].reg;
14106 inst.operands[1].reg = tmp;
14107 NEON_ENCODE (INTEGER, inst);
14108 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14113 do_neon_rshl (void)
14115 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14116 struct neon_type_el et = neon_check_type (3, rs,
14117 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14120 tmp = inst.operands[2].reg;
14121 inst.operands[2].reg = inst.operands[1].reg;
14122 inst.operands[1].reg = tmp;
14123 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14127 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14129 /* Handle .I8 pseudo-instructions. */
14132 /* Unfortunately, this will make everything apart from zero out-of-range.
14133 FIXME is this the intended semantics? There doesn't seem much point in
14134 accepting .I8 if so. */
14135 immediate |= immediate << 8;
14141 if (immediate == (immediate & 0x000000ff))
14143 *immbits = immediate;
14146 else if (immediate == (immediate & 0x0000ff00))
14148 *immbits = immediate >> 8;
14151 else if (immediate == (immediate & 0x00ff0000))
14153 *immbits = immediate >> 16;
14156 else if (immediate == (immediate & 0xff000000))
14158 *immbits = immediate >> 24;
14161 if ((immediate & 0xffff) != (immediate >> 16))
14162 goto bad_immediate;
14163 immediate &= 0xffff;
14166 if (immediate == (immediate & 0x000000ff))
14168 *immbits = immediate;
14171 else if (immediate == (immediate & 0x0000ff00))
14173 *immbits = immediate >> 8;
14178 first_error (_("immediate value out of range"));
14183 do_neon_logic (void)
14185 if (inst.operands[2].present && inst.operands[2].isreg)
14187 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14188 neon_check_type (3, rs, N_IGNORE_TYPE);
14189 /* U bit and size field were set as part of the bitmask. */
14190 NEON_ENCODE (INTEGER, inst);
14191 neon_three_same (neon_quad (rs), 0, -1);
14195 const int three_ops_form = (inst.operands[2].present
14196 && !inst.operands[2].isreg);
14197 const int immoperand = (three_ops_form ? 2 : 1);
14198 enum neon_shape rs = (three_ops_form
14199 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14200 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14201 struct neon_type_el et = neon_check_type (2, rs,
14202 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14203 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14207 if (et.type == NT_invtype)
14210 if (three_ops_form)
14211 constraint (inst.operands[0].reg != inst.operands[1].reg,
14212 _("first and second operands shall be the same register"));
14214 NEON_ENCODE (IMMED, inst);
14216 immbits = inst.operands[immoperand].imm;
14219 /* .i64 is a pseudo-op, so the immediate must be a repeating
14221 if (immbits != (inst.operands[immoperand].regisimm ?
14222 inst.operands[immoperand].reg : 0))
14224 /* Set immbits to an invalid constant. */
14225 immbits = 0xdeadbeef;
14232 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14236 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14240 /* Pseudo-instruction for VBIC. */
14241 neon_invert_size (&immbits, 0, et.size);
14242 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14246 /* Pseudo-instruction for VORR. */
14247 neon_invert_size (&immbits, 0, et.size);
14248 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14258 inst.instruction |= neon_quad (rs) << 6;
14259 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14260 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14261 inst.instruction |= cmode << 8;
14262 neon_write_immbits (immbits);
14264 neon_dp_fixup (&inst);
14269 do_neon_bitfield (void)
14271 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14272 neon_check_type (3, rs, N_IGNORE_TYPE);
14273 neon_three_same (neon_quad (rs), 0, -1);
14277 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14280 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14281 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14283 if (et.type == NT_float)
14285 NEON_ENCODE (FLOAT, inst);
14286 neon_three_same (neon_quad (rs), 0, -1);
14290 NEON_ENCODE (INTEGER, inst);
14291 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14296 do_neon_dyadic_if_su (void)
14298 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14302 do_neon_dyadic_if_su_d (void)
14304 /* This version only allow D registers, but that constraint is enforced during
14305 operand parsing so we don't need to do anything extra here. */
14306 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14310 do_neon_dyadic_if_i_d (void)
14312 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14313 affected if we specify unsigned args. */
14314 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14317 enum vfp_or_neon_is_neon_bits
14320 NEON_CHECK_ARCH = 2,
14321 NEON_CHECK_ARCH8 = 4
14324 /* Call this function if an instruction which may have belonged to the VFP or
14325 Neon instruction sets, but turned out to be a Neon instruction (due to the
14326 operand types involved, etc.). We have to check and/or fix-up a couple of
14329 - Make sure the user hasn't attempted to make a Neon instruction
14331 - Alter the value in the condition code field if necessary.
14332 - Make sure that the arch supports Neon instructions.
14334 Which of these operations take place depends on bits from enum
14335 vfp_or_neon_is_neon_bits.
14337 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14338 current instruction's condition is COND_ALWAYS, the condition field is
14339 changed to inst.uncond_value. This is necessary because instructions shared
14340 between VFP and Neon may be conditional for the VFP variants only, and the
14341 unconditional Neon version must have, e.g., 0xF in the condition field. */
14344 vfp_or_neon_is_neon (unsigned check)
14346 /* Conditions are always legal in Thumb mode (IT blocks). */
14347 if (!thumb_mode && (check & NEON_CHECK_CC))
14349 if (inst.cond != COND_ALWAYS)
14351 first_error (_(BAD_COND));
14354 if (inst.uncond_value != -1)
14355 inst.instruction |= inst.uncond_value << 28;
14358 if ((check & NEON_CHECK_ARCH)
14359 && !mark_feature_used (&fpu_neon_ext_v1))
14361 first_error (_(BAD_FPU));
14365 if ((check & NEON_CHECK_ARCH8)
14366 && !mark_feature_used (&fpu_neon_ext_armv8))
14368 first_error (_(BAD_FPU));
14376 do_neon_addsub_if_i (void)
14378 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14381 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14384 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14385 affected if we specify unsigned args. */
14386 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14389 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14391 V<op> A,B (A is operand 0, B is operand 2)
14396 so handle that case specially. */
14399 neon_exchange_operands (void)
14401 void *scratch = alloca (sizeof (inst.operands[0]));
14402 if (inst.operands[1].present)
14404 /* Swap operands[1] and operands[2]. */
14405 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14406 inst.operands[1] = inst.operands[2];
14407 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14411 inst.operands[1] = inst.operands[2];
14412 inst.operands[2] = inst.operands[0];
14417 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14419 if (inst.operands[2].isreg)
14422 neon_exchange_operands ();
14423 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14427 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14428 struct neon_type_el et = neon_check_type (2, rs,
14429 N_EQK | N_SIZ, immtypes | N_KEY);
14431 NEON_ENCODE (IMMED, inst);
14432 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14433 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14434 inst.instruction |= LOW4 (inst.operands[1].reg);
14435 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14436 inst.instruction |= neon_quad (rs) << 6;
14437 inst.instruction |= (et.type == NT_float) << 10;
14438 inst.instruction |= neon_logbits (et.size) << 18;
14440 neon_dp_fixup (&inst);
14447 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14451 do_neon_cmp_inv (void)
14453 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14459 neon_compare (N_IF_32, N_IF_32, FALSE);
14462 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14463 scalars, which are encoded in 5 bits, M : Rm.
14464 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14465 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14469 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14471 unsigned regno = NEON_SCALAR_REG (scalar);
14472 unsigned elno = NEON_SCALAR_INDEX (scalar);
14477 if (regno > 7 || elno > 3)
14479 return regno | (elno << 3);
14482 if (regno > 15 || elno > 1)
14484 return regno | (elno << 4);
14488 first_error (_("scalar out of range for multiply instruction"));
14494 /* Encode multiply / multiply-accumulate scalar instructions. */
14497 neon_mul_mac (struct neon_type_el et, int ubit)
14501 /* Give a more helpful error message if we have an invalid type. */
14502 if (et.type == NT_invtype)
14505 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14506 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14507 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14508 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14509 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14510 inst.instruction |= LOW4 (scalar);
14511 inst.instruction |= HI1 (scalar) << 5;
14512 inst.instruction |= (et.type == NT_float) << 8;
14513 inst.instruction |= neon_logbits (et.size) << 20;
14514 inst.instruction |= (ubit != 0) << 24;
14516 neon_dp_fixup (&inst);
14520 do_neon_mac_maybe_scalar (void)
14522 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14525 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14528 if (inst.operands[2].isscalar)
14530 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14531 struct neon_type_el et = neon_check_type (3, rs,
14532 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14533 NEON_ENCODE (SCALAR, inst);
14534 neon_mul_mac (et, neon_quad (rs));
14538 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14539 affected if we specify unsigned args. */
14540 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14545 do_neon_fmac (void)
14547 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14550 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14553 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14559 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14560 struct neon_type_el et = neon_check_type (3, rs,
14561 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14562 neon_three_same (neon_quad (rs), 0, et.size);
14565 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14566 same types as the MAC equivalents. The polynomial type for this instruction
14567 is encoded the same as the integer type. */
14572 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14575 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14578 if (inst.operands[2].isscalar)
14579 do_neon_mac_maybe_scalar ();
14581 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14585 do_neon_qdmulh (void)
14587 if (inst.operands[2].isscalar)
14589 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14590 struct neon_type_el et = neon_check_type (3, rs,
14591 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14592 NEON_ENCODE (SCALAR, inst);
14593 neon_mul_mac (et, neon_quad (rs));
14597 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14598 struct neon_type_el et = neon_check_type (3, rs,
14599 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14600 NEON_ENCODE (INTEGER, inst);
14601 /* The U bit (rounding) comes from bit mask. */
14602 neon_three_same (neon_quad (rs), 0, et.size);
14607 do_neon_fcmp_absolute (void)
14609 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14610 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14611 /* Size field comes from bit mask. */
14612 neon_three_same (neon_quad (rs), 1, -1);
14616 do_neon_fcmp_absolute_inv (void)
14618 neon_exchange_operands ();
14619 do_neon_fcmp_absolute ();
14623 do_neon_step (void)
14625 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14626 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14627 neon_three_same (neon_quad (rs), 0, -1);
14631 do_neon_abs_neg (void)
14633 enum neon_shape rs;
14634 struct neon_type_el et;
14636 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14639 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14642 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14643 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14645 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14646 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14647 inst.instruction |= LOW4 (inst.operands[1].reg);
14648 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14649 inst.instruction |= neon_quad (rs) << 6;
14650 inst.instruction |= (et.type == NT_float) << 10;
14651 inst.instruction |= neon_logbits (et.size) << 18;
14653 neon_dp_fixup (&inst);
14659 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14660 struct neon_type_el et = neon_check_type (2, rs,
14661 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14662 int imm = inst.operands[2].imm;
14663 constraint (imm < 0 || (unsigned)imm >= et.size,
14664 _("immediate out of range for insert"));
14665 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14671 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14672 struct neon_type_el et = neon_check_type (2, rs,
14673 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14674 int imm = inst.operands[2].imm;
14675 constraint (imm < 1 || (unsigned)imm > et.size,
14676 _("immediate out of range for insert"));
14677 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14681 do_neon_qshlu_imm (void)
14683 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14684 struct neon_type_el et = neon_check_type (2, rs,
14685 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14686 int imm = inst.operands[2].imm;
14687 constraint (imm < 0 || (unsigned)imm >= et.size,
14688 _("immediate out of range for shift"));
14689 /* Only encodes the 'U present' variant of the instruction.
14690 In this case, signed types have OP (bit 8) set to 0.
14691 Unsigned types have OP set to 1. */
14692 inst.instruction |= (et.type == NT_unsigned) << 8;
14693 /* The rest of the bits are the same as other immediate shifts. */
14694 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14698 do_neon_qmovn (void)
14700 struct neon_type_el et = neon_check_type (2, NS_DQ,
14701 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14702 /* Saturating move where operands can be signed or unsigned, and the
14703 destination has the same signedness. */
14704 NEON_ENCODE (INTEGER, inst);
14705 if (et.type == NT_unsigned)
14706 inst.instruction |= 0xc0;
14708 inst.instruction |= 0x80;
14709 neon_two_same (0, 1, et.size / 2);
14713 do_neon_qmovun (void)
14715 struct neon_type_el et = neon_check_type (2, NS_DQ,
14716 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14717 /* Saturating move with unsigned results. Operands must be signed. */
14718 NEON_ENCODE (INTEGER, inst);
14719 neon_two_same (0, 1, et.size / 2);
14723 do_neon_rshift_sat_narrow (void)
14725 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14726 or unsigned. If operands are unsigned, results must also be unsigned. */
14727 struct neon_type_el et = neon_check_type (2, NS_DQI,
14728 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14729 int imm = inst.operands[2].imm;
14730 /* This gets the bounds check, size encoding and immediate bits calculation
14734 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14735 VQMOVN.I<size> <Dd>, <Qm>. */
14738 inst.operands[2].present = 0;
14739 inst.instruction = N_MNEM_vqmovn;
14744 constraint (imm < 1 || (unsigned)imm > et.size,
14745 _("immediate out of range"));
14746 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14750 do_neon_rshift_sat_narrow_u (void)
14752 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14753 or unsigned. If operands are unsigned, results must also be unsigned. */
14754 struct neon_type_el et = neon_check_type (2, NS_DQI,
14755 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14756 int imm = inst.operands[2].imm;
14757 /* This gets the bounds check, size encoding and immediate bits calculation
14761 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14762 VQMOVUN.I<size> <Dd>, <Qm>. */
14765 inst.operands[2].present = 0;
14766 inst.instruction = N_MNEM_vqmovun;
14771 constraint (imm < 1 || (unsigned)imm > et.size,
14772 _("immediate out of range"));
14773 /* FIXME: The manual is kind of unclear about what value U should have in
14774 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14776 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14780 do_neon_movn (void)
14782 struct neon_type_el et = neon_check_type (2, NS_DQ,
14783 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14784 NEON_ENCODE (INTEGER, inst);
14785 neon_two_same (0, 1, et.size / 2);
14789 do_neon_rshift_narrow (void)
14791 struct neon_type_el et = neon_check_type (2, NS_DQI,
14792 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14793 int imm = inst.operands[2].imm;
14794 /* This gets the bounds check, size encoding and immediate bits calculation
14798 /* If immediate is zero then we are a pseudo-instruction for
14799 VMOVN.I<size> <Dd>, <Qm> */
14802 inst.operands[2].present = 0;
14803 inst.instruction = N_MNEM_vmovn;
14808 constraint (imm < 1 || (unsigned)imm > et.size,
14809 _("immediate out of range for narrowing operation"));
14810 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14814 do_neon_shll (void)
14816 /* FIXME: Type checking when lengthening. */
14817 struct neon_type_el et = neon_check_type (2, NS_QDI,
14818 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14819 unsigned imm = inst.operands[2].imm;
14821 if (imm == et.size)
14823 /* Maximum shift variant. */
14824 NEON_ENCODE (INTEGER, inst);
14825 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14826 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14827 inst.instruction |= LOW4 (inst.operands[1].reg);
14828 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14829 inst.instruction |= neon_logbits (et.size) << 18;
14831 neon_dp_fixup (&inst);
14835 /* A more-specific type check for non-max versions. */
14836 et = neon_check_type (2, NS_QDI,
14837 N_EQK | N_DBL, N_SU_32 | N_KEY);
14838 NEON_ENCODE (IMMED, inst);
14839 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14843 /* Check the various types for the VCVT instruction, and return which version
14844 the current instruction is. */
14846 #define CVT_FLAVOUR_VAR \
14847 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14848 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14849 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14850 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14851 /* Half-precision conversions. */ \
14852 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14853 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14854 /* VFP instructions. */ \
14855 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14856 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14857 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14858 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14859 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14860 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14861 /* VFP instructions with bitshift. */ \
14862 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14863 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14864 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14865 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14866 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14867 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14868 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14869 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14871 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14872 neon_cvt_flavour_##C,
14874 /* The different types of conversions we can do. */
14875 enum neon_cvt_flavour
14878 neon_cvt_flavour_invalid,
14879 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14884 static enum neon_cvt_flavour
14885 get_neon_cvt_flavour (enum neon_shape rs)
14887 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14888 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14889 if (et.type != NT_invtype) \
14891 inst.error = NULL; \
14892 return (neon_cvt_flavour_##C); \
14895 struct neon_type_el et;
14896 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14897 || rs == NS_FF) ? N_VFP : 0;
14898 /* The instruction versions which take an immediate take one register
14899 argument, which is extended to the width of the full register. Thus the
14900 "source" and "destination" registers must have the same width. Hack that
14901 here by making the size equal to the key (wider, in this case) operand. */
14902 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14906 return neon_cvt_flavour_invalid;
14921 /* Neon-syntax VFP conversions. */
14924 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
14926 const char *opname = 0;
14928 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14930 /* Conversions with immediate bitshift. */
14931 const char *enc[] =
14933 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
14939 if (flavour < (int) ARRAY_SIZE (enc))
14941 opname = enc[flavour];
14942 constraint (inst.operands[0].reg != inst.operands[1].reg,
14943 _("operands 0 and 1 must be the same register"));
14944 inst.operands[1] = inst.operands[2];
14945 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14950 /* Conversions without bitshift. */
14951 const char *enc[] =
14953 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
14959 if (flavour < (int) ARRAY_SIZE (enc))
14960 opname = enc[flavour];
14964 do_vfp_nsyn_opcode (opname);
14968 do_vfp_nsyn_cvtz (void)
14970 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14971 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14972 const char *enc[] =
14974 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
14980 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14981 do_vfp_nsyn_opcode (enc[flavour]);
14985 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
14986 enum neon_cvt_mode mode)
14991 set_it_insn_type (OUTSIDE_IT_INSN);
14995 case neon_cvt_flavour_s32_f64:
14999 case neon_cvt_flavour_s32_f32:
15003 case neon_cvt_flavour_u32_f64:
15007 case neon_cvt_flavour_u32_f32:
15012 first_error (_("invalid instruction shape"));
15018 case neon_cvt_mode_a: rm = 0; break;
15019 case neon_cvt_mode_n: rm = 1; break;
15020 case neon_cvt_mode_p: rm = 2; break;
15021 case neon_cvt_mode_m: rm = 3; break;
15022 default: first_error (_("invalid rounding mode")); return;
15025 NEON_ENCODE (FPV8, inst);
15026 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15027 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15028 inst.instruction |= sz << 8;
15029 inst.instruction |= op << 7;
15030 inst.instruction |= rm << 16;
15031 inst.instruction |= 0xf0000000;
15032 inst.is_neon = TRUE;
15036 do_neon_cvt_1 (enum neon_cvt_mode mode)
15038 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15039 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15040 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15042 /* PR11109: Handle round-to-zero for VCVT conversions. */
15043 if (mode == neon_cvt_mode_z
15044 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15045 && (flavour == neon_cvt_flavour_s32_f32
15046 || flavour == neon_cvt_flavour_u32_f32
15047 || flavour == neon_cvt_flavour_s32_f64
15048 || flavour == neon_cvt_flavour_u32_f64)
15049 && (rs == NS_FD || rs == NS_FF))
15051 do_vfp_nsyn_cvtz ();
15055 /* VFP rather than Neon conversions. */
15056 if (flavour >= neon_cvt_flavour_first_fp)
15058 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15059 do_vfp_nsyn_cvt (rs, flavour);
15061 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15072 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15074 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15077 /* Fixed-point conversion with #0 immediate is encoded as an
15078 integer conversion. */
15079 if (inst.operands[2].present && inst.operands[2].imm == 0)
15081 immbits = 32 - inst.operands[2].imm;
15082 NEON_ENCODE (IMMED, inst);
15083 if (flavour != neon_cvt_flavour_invalid)
15084 inst.instruction |= enctab[flavour];
15085 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15086 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15087 inst.instruction |= LOW4 (inst.operands[1].reg);
15088 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15089 inst.instruction |= neon_quad (rs) << 6;
15090 inst.instruction |= 1 << 21;
15091 inst.instruction |= immbits << 16;
15093 neon_dp_fixup (&inst);
15099 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15101 NEON_ENCODE (FLOAT, inst);
15102 set_it_insn_type (OUTSIDE_IT_INSN);
15104 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15107 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15108 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15109 inst.instruction |= LOW4 (inst.operands[1].reg);
15110 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15111 inst.instruction |= neon_quad (rs) << 6;
15112 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15113 inst.instruction |= mode << 8;
15115 inst.instruction |= 0xfc000000;
15117 inst.instruction |= 0xf0000000;
15123 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15125 NEON_ENCODE (INTEGER, inst);
15127 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15130 if (flavour != neon_cvt_flavour_invalid)
15131 inst.instruction |= enctab[flavour];
15133 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15134 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15135 inst.instruction |= LOW4 (inst.operands[1].reg);
15136 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15137 inst.instruction |= neon_quad (rs) << 6;
15138 inst.instruction |= 2 << 18;
15140 neon_dp_fixup (&inst);
15145 /* Half-precision conversions for Advanced SIMD -- neon. */
15150 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15152 as_bad (_("operand size must match register width"));
15157 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15159 as_bad (_("operand size must match register width"));
15164 inst.instruction = 0x3b60600;
15166 inst.instruction = 0x3b60700;
15168 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15169 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15170 inst.instruction |= LOW4 (inst.operands[1].reg);
15171 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15172 neon_dp_fixup (&inst);
15176 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15177 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15178 do_vfp_nsyn_cvt (rs, flavour);
15180 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15185 do_neon_cvtr (void)
15187 do_neon_cvt_1 (neon_cvt_mode_x);
15193 do_neon_cvt_1 (neon_cvt_mode_z);
15197 do_neon_cvta (void)
15199 do_neon_cvt_1 (neon_cvt_mode_a);
15203 do_neon_cvtn (void)
15205 do_neon_cvt_1 (neon_cvt_mode_n);
15209 do_neon_cvtp (void)
15211 do_neon_cvt_1 (neon_cvt_mode_p);
15215 do_neon_cvtm (void)
15217 do_neon_cvt_1 (neon_cvt_mode_m);
15221 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15224 mark_feature_used (&fpu_vfp_ext_armv8);
15226 encode_arm_vfp_reg (inst.operands[0].reg,
15227 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15228 encode_arm_vfp_reg (inst.operands[1].reg,
15229 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15230 inst.instruction |= to ? 0x10000 : 0;
15231 inst.instruction |= t ? 0x80 : 0;
15232 inst.instruction |= is_double ? 0x100 : 0;
15233 do_vfp_cond_or_thumb ();
15237 do_neon_cvttb_1 (bfd_boolean t)
15239 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15243 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15246 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15248 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15251 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15253 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15256 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15258 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15261 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15268 do_neon_cvtb (void)
15270 do_neon_cvttb_1 (FALSE);
15275 do_neon_cvtt (void)
15277 do_neon_cvttb_1 (TRUE);
15281 neon_move_immediate (void)
15283 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15284 struct neon_type_el et = neon_check_type (2, rs,
15285 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15286 unsigned immlo, immhi = 0, immbits;
15287 int op, cmode, float_p;
15289 constraint (et.type == NT_invtype,
15290 _("operand size must be specified for immediate VMOV"));
15292 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15293 op = (inst.instruction & (1 << 5)) != 0;
15295 immlo = inst.operands[1].imm;
15296 if (inst.operands[1].regisimm)
15297 immhi = inst.operands[1].reg;
15299 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15300 _("immediate has bits set outside the operand size"));
15302 float_p = inst.operands[1].immisfloat;
15304 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15305 et.size, et.type)) == FAIL)
15307 /* Invert relevant bits only. */
15308 neon_invert_size (&immlo, &immhi, et.size);
15309 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15310 with one or the other; those cases are caught by
15311 neon_cmode_for_move_imm. */
15313 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15314 &op, et.size, et.type)) == FAIL)
15316 first_error (_("immediate out of range"));
15321 inst.instruction &= ~(1 << 5);
15322 inst.instruction |= op << 5;
15324 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15325 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15326 inst.instruction |= neon_quad (rs) << 6;
15327 inst.instruction |= cmode << 8;
15329 neon_write_immbits (immbits);
15335 if (inst.operands[1].isreg)
15337 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15339 NEON_ENCODE (INTEGER, inst);
15340 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15341 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15342 inst.instruction |= LOW4 (inst.operands[1].reg);
15343 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15344 inst.instruction |= neon_quad (rs) << 6;
15348 NEON_ENCODE (IMMED, inst);
15349 neon_move_immediate ();
15352 neon_dp_fixup (&inst);
15355 /* Encode instructions of form:
15357 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15358 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15361 neon_mixed_length (struct neon_type_el et, unsigned size)
15363 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15364 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15365 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15366 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15367 inst.instruction |= LOW4 (inst.operands[2].reg);
15368 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15369 inst.instruction |= (et.type == NT_unsigned) << 24;
15370 inst.instruction |= neon_logbits (size) << 20;
15372 neon_dp_fixup (&inst);
15376 do_neon_dyadic_long (void)
15378 /* FIXME: Type checking for lengthening op. */
15379 struct neon_type_el et = neon_check_type (3, NS_QDD,
15380 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15381 neon_mixed_length (et, et.size);
15385 do_neon_abal (void)
15387 struct neon_type_el et = neon_check_type (3, NS_QDD,
15388 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15389 neon_mixed_length (et, et.size);
15393 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15395 if (inst.operands[2].isscalar)
15397 struct neon_type_el et = neon_check_type (3, NS_QDS,
15398 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15399 NEON_ENCODE (SCALAR, inst);
15400 neon_mul_mac (et, et.type == NT_unsigned);
15404 struct neon_type_el et = neon_check_type (3, NS_QDD,
15405 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15406 NEON_ENCODE (INTEGER, inst);
15407 neon_mixed_length (et, et.size);
15412 do_neon_mac_maybe_scalar_long (void)
15414 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15418 do_neon_dyadic_wide (void)
15420 struct neon_type_el et = neon_check_type (3, NS_QQD,
15421 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15422 neon_mixed_length (et, et.size);
15426 do_neon_dyadic_narrow (void)
15428 struct neon_type_el et = neon_check_type (3, NS_QDD,
15429 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15430 /* Operand sign is unimportant, and the U bit is part of the opcode,
15431 so force the operand type to integer. */
15432 et.type = NT_integer;
15433 neon_mixed_length (et, et.size / 2);
15437 do_neon_mul_sat_scalar_long (void)
15439 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15443 do_neon_vmull (void)
15445 if (inst.operands[2].isscalar)
15446 do_neon_mac_maybe_scalar_long ();
15449 struct neon_type_el et = neon_check_type (3, NS_QDD,
15450 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15452 if (et.type == NT_poly)
15453 NEON_ENCODE (POLY, inst);
15455 NEON_ENCODE (INTEGER, inst);
15457 /* For polynomial encoding the U bit must be zero, and the size must
15458 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15459 obviously, as 0b10). */
15462 /* Check we're on the correct architecture. */
15463 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15465 _("Instruction form not available on this architecture.");
15470 neon_mixed_length (et, et.size);
15477 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15478 struct neon_type_el et = neon_check_type (3, rs,
15479 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15480 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15482 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15483 _("shift out of range"));
15484 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15485 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15486 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15487 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15488 inst.instruction |= LOW4 (inst.operands[2].reg);
15489 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15490 inst.instruction |= neon_quad (rs) << 6;
15491 inst.instruction |= imm << 8;
15493 neon_dp_fixup (&inst);
15499 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15500 struct neon_type_el et = neon_check_type (2, rs,
15501 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15502 unsigned op = (inst.instruction >> 7) & 3;
15503 /* N (width of reversed regions) is encoded as part of the bitmask. We
15504 extract it here to check the elements to be reversed are smaller.
15505 Otherwise we'd get a reserved instruction. */
15506 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15507 gas_assert (elsize != 0);
15508 constraint (et.size >= elsize,
15509 _("elements must be smaller than reversal region"));
15510 neon_two_same (neon_quad (rs), 1, et.size);
15516 if (inst.operands[1].isscalar)
15518 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15519 struct neon_type_el et = neon_check_type (2, rs,
15520 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15521 unsigned sizebits = et.size >> 3;
15522 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15523 int logsize = neon_logbits (et.size);
15524 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15526 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15529 NEON_ENCODE (SCALAR, inst);
15530 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15531 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15532 inst.instruction |= LOW4 (dm);
15533 inst.instruction |= HI1 (dm) << 5;
15534 inst.instruction |= neon_quad (rs) << 6;
15535 inst.instruction |= x << 17;
15536 inst.instruction |= sizebits << 16;
15538 neon_dp_fixup (&inst);
15542 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15543 struct neon_type_el et = neon_check_type (2, rs,
15544 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15545 /* Duplicate ARM register to lanes of vector. */
15546 NEON_ENCODE (ARMREG, inst);
15549 case 8: inst.instruction |= 0x400000; break;
15550 case 16: inst.instruction |= 0x000020; break;
15551 case 32: inst.instruction |= 0x000000; break;
15554 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15555 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15556 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15557 inst.instruction |= neon_quad (rs) << 21;
15558 /* The encoding for this instruction is identical for the ARM and Thumb
15559 variants, except for the condition field. */
15560 do_vfp_cond_or_thumb ();
15564 /* VMOV has particularly many variations. It can be one of:
15565 0. VMOV<c><q> <Qd>, <Qm>
15566 1. VMOV<c><q> <Dd>, <Dm>
15567 (Register operations, which are VORR with Rm = Rn.)
15568 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15569 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15571 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15572 (ARM register to scalar.)
15573 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15574 (Two ARM registers to vector.)
15575 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15576 (Scalar to ARM register.)
15577 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15578 (Vector to two ARM registers.)
15579 8. VMOV.F32 <Sd>, <Sm>
15580 9. VMOV.F64 <Dd>, <Dm>
15581 (VFP register moves.)
15582 10. VMOV.F32 <Sd>, #imm
15583 11. VMOV.F64 <Dd>, #imm
15584 (VFP float immediate load.)
15585 12. VMOV <Rd>, <Sm>
15586 (VFP single to ARM reg.)
15587 13. VMOV <Sd>, <Rm>
15588 (ARM reg to VFP single.)
15589 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15590 (Two ARM regs to two VFP singles.)
15591 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15592 (Two VFP singles to two ARM regs.)
15594 These cases can be disambiguated using neon_select_shape, except cases 1/9
15595 and 3/11 which depend on the operand type too.
15597 All the encoded bits are hardcoded by this function.
15599 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15600 Cases 5, 7 may be used with VFPv2 and above.
15602 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15603 can specify a type where it doesn't make sense to, and is ignored). */
15608 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15609 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15611 struct neon_type_el et;
15612 const char *ldconst = 0;
15616 case NS_DD: /* case 1/9. */
15617 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15618 /* It is not an error here if no type is given. */
15620 if (et.type == NT_float && et.size == 64)
15622 do_vfp_nsyn_opcode ("fcpyd");
15625 /* fall through. */
15627 case NS_QQ: /* case 0/1. */
15629 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15631 /* The architecture manual I have doesn't explicitly state which
15632 value the U bit should have for register->register moves, but
15633 the equivalent VORR instruction has U = 0, so do that. */
15634 inst.instruction = 0x0200110;
15635 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15636 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15637 inst.instruction |= LOW4 (inst.operands[1].reg);
15638 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15639 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15640 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15641 inst.instruction |= neon_quad (rs) << 6;
15643 neon_dp_fixup (&inst);
15647 case NS_DI: /* case 3/11. */
15648 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15650 if (et.type == NT_float && et.size == 64)
15652 /* case 11 (fconstd). */
15653 ldconst = "fconstd";
15654 goto encode_fconstd;
15656 /* fall through. */
15658 case NS_QI: /* case 2/3. */
15659 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15661 inst.instruction = 0x0800010;
15662 neon_move_immediate ();
15663 neon_dp_fixup (&inst);
15666 case NS_SR: /* case 4. */
15668 unsigned bcdebits = 0;
15670 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15671 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15673 /* .<size> is optional here, defaulting to .32. */
15674 if (inst.vectype.elems == 0
15675 && inst.operands[0].vectype.type == NT_invtype
15676 && inst.operands[1].vectype.type == NT_invtype)
15678 inst.vectype.el[0].type = NT_untyped;
15679 inst.vectype.el[0].size = 32;
15680 inst.vectype.elems = 1;
15683 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15684 logsize = neon_logbits (et.size);
15686 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15688 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15689 && et.size != 32, _(BAD_FPU));
15690 constraint (et.type == NT_invtype, _("bad type for scalar"));
15691 constraint (x >= 64 / et.size, _("scalar index out of range"));
15695 case 8: bcdebits = 0x8; break;
15696 case 16: bcdebits = 0x1; break;
15697 case 32: bcdebits = 0x0; break;
15701 bcdebits |= x << logsize;
15703 inst.instruction = 0xe000b10;
15704 do_vfp_cond_or_thumb ();
15705 inst.instruction |= LOW4 (dn) << 16;
15706 inst.instruction |= HI1 (dn) << 7;
15707 inst.instruction |= inst.operands[1].reg << 12;
15708 inst.instruction |= (bcdebits & 3) << 5;
15709 inst.instruction |= (bcdebits >> 2) << 21;
15713 case NS_DRR: /* case 5 (fmdrr). */
15714 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15717 inst.instruction = 0xc400b10;
15718 do_vfp_cond_or_thumb ();
15719 inst.instruction |= LOW4 (inst.operands[0].reg);
15720 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15721 inst.instruction |= inst.operands[1].reg << 12;
15722 inst.instruction |= inst.operands[2].reg << 16;
15725 case NS_RS: /* case 6. */
15728 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15729 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15730 unsigned abcdebits = 0;
15732 /* .<dt> is optional here, defaulting to .32. */
15733 if (inst.vectype.elems == 0
15734 && inst.operands[0].vectype.type == NT_invtype
15735 && inst.operands[1].vectype.type == NT_invtype)
15737 inst.vectype.el[0].type = NT_untyped;
15738 inst.vectype.el[0].size = 32;
15739 inst.vectype.elems = 1;
15742 et = neon_check_type (2, NS_NULL,
15743 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15744 logsize = neon_logbits (et.size);
15746 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15748 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15749 && et.size != 32, _(BAD_FPU));
15750 constraint (et.type == NT_invtype, _("bad type for scalar"));
15751 constraint (x >= 64 / et.size, _("scalar index out of range"));
15755 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15756 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15757 case 32: abcdebits = 0x00; break;
15761 abcdebits |= x << logsize;
15762 inst.instruction = 0xe100b10;
15763 do_vfp_cond_or_thumb ();
15764 inst.instruction |= LOW4 (dn) << 16;
15765 inst.instruction |= HI1 (dn) << 7;
15766 inst.instruction |= inst.operands[0].reg << 12;
15767 inst.instruction |= (abcdebits & 3) << 5;
15768 inst.instruction |= (abcdebits >> 2) << 21;
15772 case NS_RRD: /* case 7 (fmrrd). */
15773 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15776 inst.instruction = 0xc500b10;
15777 do_vfp_cond_or_thumb ();
15778 inst.instruction |= inst.operands[0].reg << 12;
15779 inst.instruction |= inst.operands[1].reg << 16;
15780 inst.instruction |= LOW4 (inst.operands[2].reg);
15781 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15784 case NS_FF: /* case 8 (fcpys). */
15785 do_vfp_nsyn_opcode ("fcpys");
15788 case NS_FI: /* case 10 (fconsts). */
15789 ldconst = "fconsts";
15791 if (is_quarter_float (inst.operands[1].imm))
15793 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15794 do_vfp_nsyn_opcode (ldconst);
15797 first_error (_("immediate out of range"));
15800 case NS_RF: /* case 12 (fmrs). */
15801 do_vfp_nsyn_opcode ("fmrs");
15804 case NS_FR: /* case 13 (fmsr). */
15805 do_vfp_nsyn_opcode ("fmsr");
15808 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15809 (one of which is a list), but we have parsed four. Do some fiddling to
15810 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15812 case NS_RRFF: /* case 14 (fmrrs). */
15813 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15814 _("VFP registers must be adjacent"));
15815 inst.operands[2].imm = 2;
15816 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15817 do_vfp_nsyn_opcode ("fmrrs");
15820 case NS_FFRR: /* case 15 (fmsrr). */
15821 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15822 _("VFP registers must be adjacent"));
15823 inst.operands[1] = inst.operands[2];
15824 inst.operands[2] = inst.operands[3];
15825 inst.operands[0].imm = 2;
15826 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15827 do_vfp_nsyn_opcode ("fmsrr");
15831 /* neon_select_shape has determined that the instruction
15832 shape is wrong and has already set the error message. */
15841 do_neon_rshift_round_imm (void)
15843 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15844 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15845 int imm = inst.operands[2].imm;
15847 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15850 inst.operands[2].present = 0;
15855 constraint (imm < 1 || (unsigned)imm > et.size,
15856 _("immediate out of range for shift"));
15857 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15862 do_neon_movl (void)
15864 struct neon_type_el et = neon_check_type (2, NS_QD,
15865 N_EQK | N_DBL, N_SU_32 | N_KEY);
15866 unsigned sizebits = et.size >> 3;
15867 inst.instruction |= sizebits << 19;
15868 neon_two_same (0, et.type == NT_unsigned, -1);
15874 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15875 struct neon_type_el et = neon_check_type (2, rs,
15876 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15877 NEON_ENCODE (INTEGER, inst);
15878 neon_two_same (neon_quad (rs), 1, et.size);
15882 do_neon_zip_uzp (void)
15884 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15885 struct neon_type_el et = neon_check_type (2, rs,
15886 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15887 if (rs == NS_DD && et.size == 32)
15889 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15890 inst.instruction = N_MNEM_vtrn;
15894 neon_two_same (neon_quad (rs), 1, et.size);
15898 do_neon_sat_abs_neg (void)
15900 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15901 struct neon_type_el et = neon_check_type (2, rs,
15902 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15903 neon_two_same (neon_quad (rs), 1, et.size);
15907 do_neon_pair_long (void)
15909 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15910 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15911 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15912 inst.instruction |= (et.type == NT_unsigned) << 7;
15913 neon_two_same (neon_quad (rs), 1, et.size);
15917 do_neon_recip_est (void)
15919 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15920 struct neon_type_el et = neon_check_type (2, rs,
15921 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15922 inst.instruction |= (et.type == NT_float) << 8;
15923 neon_two_same (neon_quad (rs), 1, et.size);
15929 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15930 struct neon_type_el et = neon_check_type (2, rs,
15931 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15932 neon_two_same (neon_quad (rs), 1, et.size);
15938 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15939 struct neon_type_el et = neon_check_type (2, rs,
15940 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15941 neon_two_same (neon_quad (rs), 1, et.size);
15947 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15948 struct neon_type_el et = neon_check_type (2, rs,
15949 N_EQK | N_INT, N_8 | N_KEY);
15950 neon_two_same (neon_quad (rs), 1, et.size);
15956 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15957 neon_two_same (neon_quad (rs), 1, -1);
15961 do_neon_tbl_tbx (void)
15963 unsigned listlenbits;
15964 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15966 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15968 first_error (_("bad list length for table lookup"));
15972 listlenbits = inst.operands[1].imm - 1;
15973 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15974 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15975 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15976 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15977 inst.instruction |= LOW4 (inst.operands[2].reg);
15978 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15979 inst.instruction |= listlenbits << 8;
15981 neon_dp_fixup (&inst);
15985 do_neon_ldm_stm (void)
15987 /* P, U and L bits are part of bitmask. */
15988 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15989 unsigned offsetbits = inst.operands[1].imm * 2;
15991 if (inst.operands[1].issingle)
15993 do_vfp_nsyn_ldm_stm (is_dbmode);
15997 constraint (is_dbmode && !inst.operands[0].writeback,
15998 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16000 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16001 _("register list must contain at least 1 and at most 16 "
16004 inst.instruction |= inst.operands[0].reg << 16;
16005 inst.instruction |= inst.operands[0].writeback << 21;
16006 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16007 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16009 inst.instruction |= offsetbits;
16011 do_vfp_cond_or_thumb ();
16015 do_neon_ldr_str (void)
16017 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16019 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16020 And is UNPREDICTABLE in thumb mode. */
16022 && inst.operands[1].reg == REG_PC
16023 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16026 inst.error = _("Use of PC here is UNPREDICTABLE");
16027 else if (warn_on_deprecated)
16028 as_warn (_("Use of PC here is deprecated"));
16031 if (inst.operands[0].issingle)
16034 do_vfp_nsyn_opcode ("flds");
16036 do_vfp_nsyn_opcode ("fsts");
16041 do_vfp_nsyn_opcode ("fldd");
16043 do_vfp_nsyn_opcode ("fstd");
16047 /* "interleave" version also handles non-interleaving register VLD1/VST1
16051 do_neon_ld_st_interleave (void)
16053 struct neon_type_el et = neon_check_type (1, NS_NULL,
16054 N_8 | N_16 | N_32 | N_64);
16055 unsigned alignbits = 0;
16057 /* The bits in this table go:
16058 0: register stride of one (0) or two (1)
16059 1,2: register list length, minus one (1, 2, 3, 4).
16060 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16061 We use -1 for invalid entries. */
16062 const int typetable[] =
16064 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16065 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16066 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16067 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16071 if (et.type == NT_invtype)
16074 if (inst.operands[1].immisalign)
16075 switch (inst.operands[1].imm >> 8)
16077 case 64: alignbits = 1; break;
16079 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16080 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16081 goto bad_alignment;
16085 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16086 goto bad_alignment;
16091 first_error (_("bad alignment"));
16095 inst.instruction |= alignbits << 4;
16096 inst.instruction |= neon_logbits (et.size) << 6;
16098 /* Bits [4:6] of the immediate in a list specifier encode register stride
16099 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16100 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16101 up the right value for "type" in a table based on this value and the given
16102 list style, then stick it back. */
16103 idx = ((inst.operands[0].imm >> 4) & 7)
16104 | (((inst.instruction >> 8) & 3) << 3);
16106 typebits = typetable[idx];
16108 constraint (typebits == -1, _("bad list type for instruction"));
16109 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16110 _("bad element type for instruction"));
16112 inst.instruction &= ~0xf00;
16113 inst.instruction |= typebits << 8;
16116 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16117 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16118 otherwise. The variable arguments are a list of pairs of legal (size, align)
16119 values, terminated with -1. */
16122 neon_alignment_bit (int size, int align, int *do_align, ...)
16125 int result = FAIL, thissize, thisalign;
16127 if (!inst.operands[1].immisalign)
16133 va_start (ap, do_align);
16137 thissize = va_arg (ap, int);
16138 if (thissize == -1)
16140 thisalign = va_arg (ap, int);
16142 if (size == thissize && align == thisalign)
16145 while (result != SUCCESS);
16149 if (result == SUCCESS)
16152 first_error (_("unsupported alignment for instruction"));
16158 do_neon_ld_st_lane (void)
16160 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16161 int align_good, do_align = 0;
16162 int logsize = neon_logbits (et.size);
16163 int align = inst.operands[1].imm >> 8;
16164 int n = (inst.instruction >> 8) & 3;
16165 int max_el = 64 / et.size;
16167 if (et.type == NT_invtype)
16170 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16171 _("bad list length"));
16172 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16173 _("scalar index out of range"));
16174 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16176 _("stride of 2 unavailable when element size is 8"));
16180 case 0: /* VLD1 / VST1. */
16181 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16183 if (align_good == FAIL)
16187 unsigned alignbits = 0;
16190 case 16: alignbits = 0x1; break;
16191 case 32: alignbits = 0x3; break;
16194 inst.instruction |= alignbits << 4;
16198 case 1: /* VLD2 / VST2. */
16199 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16201 if (align_good == FAIL)
16204 inst.instruction |= 1 << 4;
16207 case 2: /* VLD3 / VST3. */
16208 constraint (inst.operands[1].immisalign,
16209 _("can't use alignment with this instruction"));
16212 case 3: /* VLD4 / VST4. */
16213 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16214 16, 64, 32, 64, 32, 128, -1);
16215 if (align_good == FAIL)
16219 unsigned alignbits = 0;
16222 case 8: alignbits = 0x1; break;
16223 case 16: alignbits = 0x1; break;
16224 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16227 inst.instruction |= alignbits << 4;
16234 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16235 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16236 inst.instruction |= 1 << (4 + logsize);
16238 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16239 inst.instruction |= logsize << 10;
16242 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16245 do_neon_ld_dup (void)
16247 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16248 int align_good, do_align = 0;
16250 if (et.type == NT_invtype)
16253 switch ((inst.instruction >> 8) & 3)
16255 case 0: /* VLD1. */
16256 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16257 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16258 &do_align, 16, 16, 32, 32, -1);
16259 if (align_good == FAIL)
16261 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16264 case 2: inst.instruction |= 1 << 5; break;
16265 default: first_error (_("bad list length")); return;
16267 inst.instruction |= neon_logbits (et.size) << 6;
16270 case 1: /* VLD2. */
16271 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16272 &do_align, 8, 16, 16, 32, 32, 64, -1);
16273 if (align_good == FAIL)
16275 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16276 _("bad list length"));
16277 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16278 inst.instruction |= 1 << 5;
16279 inst.instruction |= neon_logbits (et.size) << 6;
16282 case 2: /* VLD3. */
16283 constraint (inst.operands[1].immisalign,
16284 _("can't use alignment with this instruction"));
16285 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16286 _("bad list length"));
16287 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16288 inst.instruction |= 1 << 5;
16289 inst.instruction |= neon_logbits (et.size) << 6;
16292 case 3: /* VLD4. */
16294 int align = inst.operands[1].imm >> 8;
16295 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16296 16, 64, 32, 64, 32, 128, -1);
16297 if (align_good == FAIL)
16299 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16300 _("bad list length"));
16301 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16302 inst.instruction |= 1 << 5;
16303 if (et.size == 32 && align == 128)
16304 inst.instruction |= 0x3 << 6;
16306 inst.instruction |= neon_logbits (et.size) << 6;
16313 inst.instruction |= do_align << 4;
16316 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16317 apart from bits [11:4]. */
16320 do_neon_ldx_stx (void)
16322 if (inst.operands[1].isreg)
16323 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16325 switch (NEON_LANE (inst.operands[0].imm))
16327 case NEON_INTERLEAVE_LANES:
16328 NEON_ENCODE (INTERLV, inst);
16329 do_neon_ld_st_interleave ();
16332 case NEON_ALL_LANES:
16333 NEON_ENCODE (DUP, inst);
16334 if (inst.instruction == N_INV)
16336 first_error ("only loads support such operands");
16343 NEON_ENCODE (LANE, inst);
16344 do_neon_ld_st_lane ();
16347 /* L bit comes from bit mask. */
16348 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16349 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16350 inst.instruction |= inst.operands[1].reg << 16;
16352 if (inst.operands[1].postind)
16354 int postreg = inst.operands[1].imm & 0xf;
16355 constraint (!inst.operands[1].immisreg,
16356 _("post-index must be a register"));
16357 constraint (postreg == 0xd || postreg == 0xf,
16358 _("bad register for post-index"));
16359 inst.instruction |= postreg;
16363 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16364 constraint (inst.reloc.exp.X_op != O_constant
16365 || inst.reloc.exp.X_add_number != 0,
16368 if (inst.operands[1].writeback)
16370 inst.instruction |= 0xd;
16373 inst.instruction |= 0xf;
16377 inst.instruction |= 0xf9000000;
16379 inst.instruction |= 0xf4000000;
16384 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16386 NEON_ENCODE (FPV8, inst);
16389 do_vfp_sp_dyadic ();
16391 do_vfp_dp_rd_rn_rm ();
16394 inst.instruction |= 0x100;
16396 inst.instruction |= 0xf0000000;
16402 set_it_insn_type (OUTSIDE_IT_INSN);
16404 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16405 first_error (_("invalid instruction shape"));
16411 set_it_insn_type (OUTSIDE_IT_INSN);
16413 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16416 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16419 neon_dyadic_misc (NT_untyped, N_F32, 0);
16423 do_vrint_1 (enum neon_cvt_mode mode)
16425 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16426 struct neon_type_el et;
16431 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16432 if (et.type != NT_invtype)
16434 /* VFP encodings. */
16435 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16436 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16437 set_it_insn_type (OUTSIDE_IT_INSN);
16439 NEON_ENCODE (FPV8, inst);
16441 do_vfp_sp_monadic ();
16443 do_vfp_dp_rd_rm ();
16447 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16448 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16449 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16450 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16451 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16452 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16453 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16457 inst.instruction |= (rs == NS_DD) << 8;
16458 do_vfp_cond_or_thumb ();
16462 /* Neon encodings (or something broken...). */
16464 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16466 if (et.type == NT_invtype)
16469 set_it_insn_type (OUTSIDE_IT_INSN);
16470 NEON_ENCODE (FLOAT, inst);
16472 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16475 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16476 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16477 inst.instruction |= LOW4 (inst.operands[1].reg);
16478 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16479 inst.instruction |= neon_quad (rs) << 6;
16482 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16483 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16484 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16485 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16486 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16487 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16488 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16493 inst.instruction |= 0xfc000000;
16495 inst.instruction |= 0xf0000000;
16502 do_vrint_1 (neon_cvt_mode_x);
16508 do_vrint_1 (neon_cvt_mode_z);
16514 do_vrint_1 (neon_cvt_mode_r);
16520 do_vrint_1 (neon_cvt_mode_a);
16526 do_vrint_1 (neon_cvt_mode_n);
16532 do_vrint_1 (neon_cvt_mode_p);
16538 do_vrint_1 (neon_cvt_mode_m);
16541 /* Crypto v1 instructions. */
16543 do_crypto_2op_1 (unsigned elttype, int op)
16545 set_it_insn_type (OUTSIDE_IT_INSN);
16547 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16553 NEON_ENCODE (INTEGER, inst);
16554 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16555 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16556 inst.instruction |= LOW4 (inst.operands[1].reg);
16557 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16559 inst.instruction |= op << 6;
16562 inst.instruction |= 0xfc000000;
16564 inst.instruction |= 0xf0000000;
16568 do_crypto_3op_1 (int u, int op)
16570 set_it_insn_type (OUTSIDE_IT_INSN);
16572 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16573 N_32 | N_UNT | N_KEY).type == NT_invtype)
16578 NEON_ENCODE (INTEGER, inst);
16579 neon_three_same (1, u, 8 << op);
16585 do_crypto_2op_1 (N_8, 0);
16591 do_crypto_2op_1 (N_8, 1);
16597 do_crypto_2op_1 (N_8, 2);
16603 do_crypto_2op_1 (N_8, 3);
16609 do_crypto_3op_1 (0, 0);
16615 do_crypto_3op_1 (0, 1);
16621 do_crypto_3op_1 (0, 2);
16627 do_crypto_3op_1 (0, 3);
16633 do_crypto_3op_1 (1, 0);
16639 do_crypto_3op_1 (1, 1);
16643 do_sha256su1 (void)
16645 do_crypto_3op_1 (1, 2);
16651 do_crypto_2op_1 (N_32, -1);
16657 do_crypto_2op_1 (N_32, 0);
16661 do_sha256su0 (void)
16663 do_crypto_2op_1 (N_32, 1);
16667 do_crc32_1 (unsigned int poly, unsigned int sz)
16669 unsigned int Rd = inst.operands[0].reg;
16670 unsigned int Rn = inst.operands[1].reg;
16671 unsigned int Rm = inst.operands[2].reg;
16673 set_it_insn_type (OUTSIDE_IT_INSN);
16674 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16675 inst.instruction |= LOW4 (Rn) << 16;
16676 inst.instruction |= LOW4 (Rm);
16677 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16678 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16680 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16681 as_warn (UNPRED_REG ("r15"));
16682 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16683 as_warn (UNPRED_REG ("r13"));
16723 /* Overall per-instruction processing. */
16725 /* We need to be able to fix up arbitrary expressions in some statements.
16726 This is so that we can handle symbols that are an arbitrary distance from
16727 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16728 which returns part of an address in a form which will be valid for
16729 a data instruction. We do this by pushing the expression into a symbol
16730 in the expr_section, and creating a fix for that. */
16733 fix_new_arm (fragS * frag,
16747 /* Create an absolute valued symbol, so we have something to
16748 refer to in the object file. Unfortunately for us, gas's
16749 generic expression parsing will already have folded out
16750 any use of .set foo/.type foo %function that may have
16751 been used to set type information of the target location,
16752 that's being specified symbolically. We have to presume
16753 the user knows what they are doing. */
16757 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16759 symbol = symbol_find_or_make (name);
16760 S_SET_SEGMENT (symbol, absolute_section);
16761 symbol_set_frag (symbol, &zero_address_frag);
16762 S_SET_VALUE (symbol, exp->X_add_number);
16763 exp->X_op = O_symbol;
16764 exp->X_add_symbol = symbol;
16765 exp->X_add_number = 0;
16771 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16772 (enum bfd_reloc_code_real) reloc);
16776 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16777 pc_rel, (enum bfd_reloc_code_real) reloc);
16781 /* Mark whether the fix is to a THUMB instruction, or an ARM
16783 new_fix->tc_fix_data = thumb_mode;
16786 /* Create a frg for an instruction requiring relaxation. */
16788 output_relax_insn (void)
16794 /* The size of the instruction is unknown, so tie the debug info to the
16795 start of the instruction. */
16796 dwarf2_emit_insn (0);
16798 switch (inst.reloc.exp.X_op)
16801 sym = inst.reloc.exp.X_add_symbol;
16802 offset = inst.reloc.exp.X_add_number;
16806 offset = inst.reloc.exp.X_add_number;
16809 sym = make_expr_symbol (&inst.reloc.exp);
16813 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16814 inst.relax, sym, offset, NULL/*offset, opcode*/);
16815 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16818 /* Write a 32-bit thumb instruction to buf. */
16820 put_thumb32_insn (char * buf, unsigned long insn)
16822 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16823 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16827 output_inst (const char * str)
16833 as_bad ("%s -- `%s'", inst.error, str);
16838 output_relax_insn ();
16841 if (inst.size == 0)
16844 to = frag_more (inst.size);
16845 /* PR 9814: Record the thumb mode into the current frag so that we know
16846 what type of NOP padding to use, if necessary. We override any previous
16847 setting so that if the mode has changed then the NOPS that we use will
16848 match the encoding of the last instruction in the frag. */
16849 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16851 if (thumb_mode && (inst.size > THUMB_SIZE))
16853 gas_assert (inst.size == (2 * THUMB_SIZE));
16854 put_thumb32_insn (to, inst.instruction);
16856 else if (inst.size > INSN_SIZE)
16858 gas_assert (inst.size == (2 * INSN_SIZE));
16859 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16860 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16863 md_number_to_chars (to, inst.instruction, inst.size);
16865 if (inst.reloc.type != BFD_RELOC_UNUSED)
16866 fix_new_arm (frag_now, to - frag_now->fr_literal,
16867 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16870 dwarf2_emit_insn (inst.size);
16874 output_it_inst (int cond, int mask, char * to)
16876 unsigned long instruction = 0xbf00;
16879 instruction |= mask;
16880 instruction |= cond << 4;
16884 to = frag_more (2);
16886 dwarf2_emit_insn (2);
16890 md_number_to_chars (to, instruction, 2);
16895 /* Tag values used in struct asm_opcode's tag field. */
16898 OT_unconditional, /* Instruction cannot be conditionalized.
16899 The ARM condition field is still 0xE. */
16900 OT_unconditionalF, /* Instruction cannot be conditionalized
16901 and carries 0xF in its ARM condition field. */
16902 OT_csuffix, /* Instruction takes a conditional suffix. */
16903 OT_csuffixF, /* Some forms of the instruction take a conditional
16904 suffix, others place 0xF where the condition field
16906 OT_cinfix3, /* Instruction takes a conditional infix,
16907 beginning at character index 3. (In
16908 unified mode, it becomes a suffix.) */
16909 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
16910 tsts, cmps, cmns, and teqs. */
16911 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
16912 character index 3, even in unified mode. Used for
16913 legacy instructions where suffix and infix forms
16914 may be ambiguous. */
16915 OT_csuf_or_in3, /* Instruction takes either a conditional
16916 suffix or an infix at character index 3. */
16917 OT_odd_infix_unc, /* This is the unconditional variant of an
16918 instruction that takes a conditional infix
16919 at an unusual position. In unified mode,
16920 this variant will accept a suffix. */
16921 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
16922 are the conditional variants of instructions that
16923 take conditional infixes in unusual positions.
16924 The infix appears at character index
16925 (tag - OT_odd_infix_0). These are not accepted
16926 in unified mode. */
16929 /* Subroutine of md_assemble, responsible for looking up the primary
16930 opcode from the mnemonic the user wrote. STR points to the
16931 beginning of the mnemonic.
16933 This is not simply a hash table lookup, because of conditional
16934 variants. Most instructions have conditional variants, which are
16935 expressed with a _conditional affix_ to the mnemonic. If we were
16936 to encode each conditional variant as a literal string in the opcode
16937 table, it would have approximately 20,000 entries.
16939 Most mnemonics take this affix as a suffix, and in unified syntax,
16940 'most' is upgraded to 'all'. However, in the divided syntax, some
16941 instructions take the affix as an infix, notably the s-variants of
16942 the arithmetic instructions. Of those instructions, all but six
16943 have the infix appear after the third character of the mnemonic.
16945 Accordingly, the algorithm for looking up primary opcodes given
16948 1. Look up the identifier in the opcode table.
16949 If we find a match, go to step U.
16951 2. Look up the last two characters of the identifier in the
16952 conditions table. If we find a match, look up the first N-2
16953 characters of the identifier in the opcode table. If we
16954 find a match, go to step CE.
16956 3. Look up the fourth and fifth characters of the identifier in
16957 the conditions table. If we find a match, extract those
16958 characters from the identifier, and look up the remaining
16959 characters in the opcode table. If we find a match, go
16964 U. Examine the tag field of the opcode structure, in case this is
16965 one of the six instructions with its conditional infix in an
16966 unusual place. If it is, the tag tells us where to find the
16967 infix; look it up in the conditions table and set inst.cond
16968 accordingly. Otherwise, this is an unconditional instruction.
16969 Again set inst.cond accordingly. Return the opcode structure.
16971 CE. Examine the tag field to make sure this is an instruction that
16972 should receive a conditional suffix. If it is not, fail.
16973 Otherwise, set inst.cond from the suffix we already looked up,
16974 and return the opcode structure.
16976 CM. Examine the tag field to make sure this is an instruction that
16977 should receive a conditional infix after the third character.
16978 If it is not, fail. Otherwise, undo the edits to the current
16979 line of input and proceed as for case CE. */
16981 static const struct asm_opcode *
16982 opcode_lookup (char **str)
16986 const struct asm_opcode *opcode;
16987 const struct asm_cond *cond;
16990 /* Scan up to the end of the mnemonic, which must end in white space,
16991 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
16992 for (base = end = *str; *end != '\0'; end++)
16993 if (*end == ' ' || *end == '.')
16999 /* Handle a possible width suffix and/or Neon type suffix. */
17004 /* The .w and .n suffixes are only valid if the unified syntax is in
17006 if (unified_syntax && end[1] == 'w')
17008 else if (unified_syntax && end[1] == 'n')
17013 inst.vectype.elems = 0;
17015 *str = end + offset;
17017 if (end[offset] == '.')
17019 /* See if we have a Neon type suffix (possible in either unified or
17020 non-unified ARM syntax mode). */
17021 if (parse_neon_type (&inst.vectype, str) == FAIL)
17024 else if (end[offset] != '\0' && end[offset] != ' ')
17030 /* Look for unaffixed or special-case affixed mnemonic. */
17031 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17036 if (opcode->tag < OT_odd_infix_0)
17038 inst.cond = COND_ALWAYS;
17042 if (warn_on_deprecated && unified_syntax)
17043 as_warn (_("conditional infixes are deprecated in unified syntax"));
17044 affix = base + (opcode->tag - OT_odd_infix_0);
17045 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17048 inst.cond = cond->value;
17052 /* Cannot have a conditional suffix on a mnemonic of less than two
17054 if (end - base < 3)
17057 /* Look for suffixed mnemonic. */
17059 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17060 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17062 if (opcode && cond)
17065 switch (opcode->tag)
17067 case OT_cinfix3_legacy:
17068 /* Ignore conditional suffixes matched on infix only mnemonics. */
17072 case OT_cinfix3_deprecated:
17073 case OT_odd_infix_unc:
17074 if (!unified_syntax)
17076 /* else fall through */
17080 case OT_csuf_or_in3:
17081 inst.cond = cond->value;
17084 case OT_unconditional:
17085 case OT_unconditionalF:
17087 inst.cond = cond->value;
17090 /* Delayed diagnostic. */
17091 inst.error = BAD_COND;
17092 inst.cond = COND_ALWAYS;
17101 /* Cannot have a usual-position infix on a mnemonic of less than
17102 six characters (five would be a suffix). */
17103 if (end - base < 6)
17106 /* Look for infixed mnemonic in the usual position. */
17108 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17112 memcpy (save, affix, 2);
17113 memmove (affix, affix + 2, (end - affix) - 2);
17114 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17116 memmove (affix + 2, affix, (end - affix) - 2);
17117 memcpy (affix, save, 2);
17120 && (opcode->tag == OT_cinfix3
17121 || opcode->tag == OT_cinfix3_deprecated
17122 || opcode->tag == OT_csuf_or_in3
17123 || opcode->tag == OT_cinfix3_legacy))
17126 if (warn_on_deprecated && unified_syntax
17127 && (opcode->tag == OT_cinfix3
17128 || opcode->tag == OT_cinfix3_deprecated))
17129 as_warn (_("conditional infixes are deprecated in unified syntax"));
17131 inst.cond = cond->value;
17138 /* This function generates an initial IT instruction, leaving its block
17139 virtually open for the new instructions. Eventually,
17140 the mask will be updated by now_it_add_mask () each time
17141 a new instruction needs to be included in the IT block.
17142 Finally, the block is closed with close_automatic_it_block ().
17143 The block closure can be requested either from md_assemble (),
17144 a tencode (), or due to a label hook. */
17147 new_automatic_it_block (int cond)
17149 now_it.state = AUTOMATIC_IT_BLOCK;
17150 now_it.mask = 0x18;
17152 now_it.block_length = 1;
17153 mapping_state (MAP_THUMB);
17154 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17155 now_it.warn_deprecated = FALSE;
17156 now_it.insn_cond = TRUE;
17159 /* Close an automatic IT block.
17160 See comments in new_automatic_it_block (). */
17163 close_automatic_it_block (void)
17165 now_it.mask = 0x10;
17166 now_it.block_length = 0;
17169 /* Update the mask of the current automatically-generated IT
17170 instruction. See comments in new_automatic_it_block (). */
17173 now_it_add_mask (int cond)
17175 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17176 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17177 | ((bitvalue) << (nbit)))
17178 const int resulting_bit = (cond & 1);
17180 now_it.mask &= 0xf;
17181 now_it.mask = SET_BIT_VALUE (now_it.mask,
17183 (5 - now_it.block_length));
17184 now_it.mask = SET_BIT_VALUE (now_it.mask,
17186 ((5 - now_it.block_length) - 1) );
17187 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17190 #undef SET_BIT_VALUE
17193 /* The IT blocks handling machinery is accessed through the these functions:
17194 it_fsm_pre_encode () from md_assemble ()
17195 set_it_insn_type () optional, from the tencode functions
17196 set_it_insn_type_last () ditto
17197 in_it_block () ditto
17198 it_fsm_post_encode () from md_assemble ()
17199 force_automatic_it_block_close () from label habdling functions
17202 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17203 initializing the IT insn type with a generic initial value depending
17204 on the inst.condition.
17205 2) During the tencode function, two things may happen:
17206 a) The tencode function overrides the IT insn type by
17207 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17208 b) The tencode function queries the IT block state by
17209 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17211 Both set_it_insn_type and in_it_block run the internal FSM state
17212 handling function (handle_it_state), because: a) setting the IT insn
17213 type may incur in an invalid state (exiting the function),
17214 and b) querying the state requires the FSM to be updated.
17215 Specifically we want to avoid creating an IT block for conditional
17216 branches, so it_fsm_pre_encode is actually a guess and we can't
17217 determine whether an IT block is required until the tencode () routine
17218 has decided what type of instruction this actually it.
17219 Because of this, if set_it_insn_type and in_it_block have to be used,
17220 set_it_insn_type has to be called first.
17222 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17223 determines the insn IT type depending on the inst.cond code.
17224 When a tencode () routine encodes an instruction that can be
17225 either outside an IT block, or, in the case of being inside, has to be
17226 the last one, set_it_insn_type_last () will determine the proper
17227 IT instruction type based on the inst.cond code. Otherwise,
17228 set_it_insn_type can be called for overriding that logic or
17229 for covering other cases.
17231 Calling handle_it_state () may not transition the IT block state to
17232 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17233 still queried. Instead, if the FSM determines that the state should
17234 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17235 after the tencode () function: that's what it_fsm_post_encode () does.
17237 Since in_it_block () calls the state handling function to get an
17238 updated state, an error may occur (due to invalid insns combination).
17239 In that case, inst.error is set.
17240 Therefore, inst.error has to be checked after the execution of
17241 the tencode () routine.
17243 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17244 any pending state change (if any) that didn't take place in
17245 handle_it_state () as explained above. */
17248 it_fsm_pre_encode (void)
17250 if (inst.cond != COND_ALWAYS)
17251 inst.it_insn_type = INSIDE_IT_INSN;
17253 inst.it_insn_type = OUTSIDE_IT_INSN;
17255 now_it.state_handled = 0;
17258 /* IT state FSM handling function. */
17261 handle_it_state (void)
17263 now_it.state_handled = 1;
17264 now_it.insn_cond = FALSE;
17266 switch (now_it.state)
17268 case OUTSIDE_IT_BLOCK:
17269 switch (inst.it_insn_type)
17271 case OUTSIDE_IT_INSN:
17274 case INSIDE_IT_INSN:
17275 case INSIDE_IT_LAST_INSN:
17276 if (thumb_mode == 0)
17279 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17280 as_tsktsk (_("Warning: conditional outside an IT block"\
17285 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17286 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
17288 /* Automatically generate the IT instruction. */
17289 new_automatic_it_block (inst.cond);
17290 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17291 close_automatic_it_block ();
17295 inst.error = BAD_OUT_IT;
17301 case IF_INSIDE_IT_LAST_INSN:
17302 case NEUTRAL_IT_INSN:
17306 now_it.state = MANUAL_IT_BLOCK;
17307 now_it.block_length = 0;
17312 case AUTOMATIC_IT_BLOCK:
17313 /* Three things may happen now:
17314 a) We should increment current it block size;
17315 b) We should close current it block (closing insn or 4 insns);
17316 c) We should close current it block and start a new one (due
17317 to incompatible conditions or
17318 4 insns-length block reached). */
17320 switch (inst.it_insn_type)
17322 case OUTSIDE_IT_INSN:
17323 /* The closure of the block shall happen immediatelly,
17324 so any in_it_block () call reports the block as closed. */
17325 force_automatic_it_block_close ();
17328 case INSIDE_IT_INSN:
17329 case INSIDE_IT_LAST_INSN:
17330 case IF_INSIDE_IT_LAST_INSN:
17331 now_it.block_length++;
17333 if (now_it.block_length > 4
17334 || !now_it_compatible (inst.cond))
17336 force_automatic_it_block_close ();
17337 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17338 new_automatic_it_block (inst.cond);
17342 now_it.insn_cond = TRUE;
17343 now_it_add_mask (inst.cond);
17346 if (now_it.state == AUTOMATIC_IT_BLOCK
17347 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17348 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17349 close_automatic_it_block ();
17352 case NEUTRAL_IT_INSN:
17353 now_it.block_length++;
17354 now_it.insn_cond = TRUE;
17356 if (now_it.block_length > 4)
17357 force_automatic_it_block_close ();
17359 now_it_add_mask (now_it.cc & 1);
17363 close_automatic_it_block ();
17364 now_it.state = MANUAL_IT_BLOCK;
17369 case MANUAL_IT_BLOCK:
17371 /* Check conditional suffixes. */
17372 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17375 now_it.mask &= 0x1f;
17376 is_last = (now_it.mask == 0x10);
17377 now_it.insn_cond = TRUE;
17379 switch (inst.it_insn_type)
17381 case OUTSIDE_IT_INSN:
17382 inst.error = BAD_NOT_IT;
17385 case INSIDE_IT_INSN:
17386 if (cond != inst.cond)
17388 inst.error = BAD_IT_COND;
17393 case INSIDE_IT_LAST_INSN:
17394 case IF_INSIDE_IT_LAST_INSN:
17395 if (cond != inst.cond)
17397 inst.error = BAD_IT_COND;
17402 inst.error = BAD_BRANCH;
17407 case NEUTRAL_IT_INSN:
17408 /* The BKPT instruction is unconditional even in an IT block. */
17412 inst.error = BAD_IT_IT;
17422 struct depr_insn_mask
17424 unsigned long pattern;
17425 unsigned long mask;
17426 const char* description;
17429 /* List of 16-bit instruction patterns deprecated in an IT block in
17431 static const struct depr_insn_mask depr_it_insns[] = {
17432 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17433 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17434 { 0xa000, 0xb800, N_("ADR") },
17435 { 0x4800, 0xf800, N_("Literal loads") },
17436 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17437 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17438 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17439 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17440 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17445 it_fsm_post_encode (void)
17449 if (!now_it.state_handled)
17450 handle_it_state ();
17452 if (now_it.insn_cond
17453 && !now_it.warn_deprecated
17454 && warn_on_deprecated
17455 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17457 if (inst.instruction >= 0x10000)
17459 as_warn (_("IT blocks containing 32-bit Thumb instructions are "
17460 "deprecated in ARMv8"));
17461 now_it.warn_deprecated = TRUE;
17465 const struct depr_insn_mask *p = depr_it_insns;
17467 while (p->mask != 0)
17469 if ((inst.instruction & p->mask) == p->pattern)
17471 as_warn (_("IT blocks containing 16-bit Thumb instructions "
17472 "of the following class are deprecated in ARMv8: "
17473 "%s"), p->description);
17474 now_it.warn_deprecated = TRUE;
17482 if (now_it.block_length > 1)
17484 as_warn (_("IT blocks containing more than one conditional "
17485 "instruction are deprecated in ARMv8"));
17486 now_it.warn_deprecated = TRUE;
17490 is_last = (now_it.mask == 0x10);
17493 now_it.state = OUTSIDE_IT_BLOCK;
17499 force_automatic_it_block_close (void)
17501 if (now_it.state == AUTOMATIC_IT_BLOCK)
17503 close_automatic_it_block ();
17504 now_it.state = OUTSIDE_IT_BLOCK;
17512 if (!now_it.state_handled)
17513 handle_it_state ();
17515 return now_it.state != OUTSIDE_IT_BLOCK;
17519 md_assemble (char *str)
17522 const struct asm_opcode * opcode;
17524 /* Align the previous label if needed. */
17525 if (last_label_seen != NULL)
17527 symbol_set_frag (last_label_seen, frag_now);
17528 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17529 S_SET_SEGMENT (last_label_seen, now_seg);
17532 memset (&inst, '\0', sizeof (inst));
17533 inst.reloc.type = BFD_RELOC_UNUSED;
17535 opcode = opcode_lookup (&p);
17538 /* It wasn't an instruction, but it might be a register alias of
17539 the form alias .req reg, or a Neon .dn/.qn directive. */
17540 if (! create_register_alias (str, p)
17541 && ! create_neon_reg_alias (str, p))
17542 as_bad (_("bad instruction `%s'"), str);
17547 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17548 as_warn (_("s suffix on comparison instruction is deprecated"));
17550 /* The value which unconditional instructions should have in place of the
17551 condition field. */
17552 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17556 arm_feature_set variant;
17558 variant = cpu_variant;
17559 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17560 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17561 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17562 /* Check that this instruction is supported for this CPU. */
17563 if (!opcode->tvariant
17564 || (thumb_mode == 1
17565 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17567 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17570 if (inst.cond != COND_ALWAYS && !unified_syntax
17571 && opcode->tencode != do_t_branch)
17573 as_bad (_("Thumb does not support conditional execution"));
17577 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17579 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17580 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17581 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17583 /* Two things are addressed here.
17584 1) Implicit require narrow instructions on Thumb-1.
17585 This avoids relaxation accidentally introducing Thumb-2
17587 2) Reject wide instructions in non Thumb-2 cores. */
17588 if (inst.size_req == 0)
17590 else if (inst.size_req == 4)
17592 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17598 inst.instruction = opcode->tvalue;
17600 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17602 /* Prepare the it_insn_type for those encodings that don't set
17604 it_fsm_pre_encode ();
17606 opcode->tencode ();
17608 it_fsm_post_encode ();
17611 if (!(inst.error || inst.relax))
17613 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17614 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17615 if (inst.size_req && inst.size_req != inst.size)
17617 as_bad (_("cannot honor width suffix -- `%s'"), str);
17622 /* Something has gone badly wrong if we try to relax a fixed size
17624 gas_assert (inst.size_req == 0 || !inst.relax);
17626 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17627 *opcode->tvariant);
17628 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17629 set those bits when Thumb-2 32-bit instructions are seen. ie.
17630 anything other than bl/blx and v6-M instructions.
17631 This is overly pessimistic for relaxable instructions. */
17632 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17634 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17635 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17636 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17639 check_neon_suffixes;
17643 mapping_state (MAP_THUMB);
17646 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17650 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17651 is_bx = (opcode->aencode == do_bx);
17653 /* Check that this instruction is supported for this CPU. */
17654 if (!(is_bx && fix_v4bx)
17655 && !(opcode->avariant &&
17656 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17658 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17663 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17667 inst.instruction = opcode->avalue;
17668 if (opcode->tag == OT_unconditionalF)
17669 inst.instruction |= 0xF << 28;
17671 inst.instruction |= inst.cond << 28;
17672 inst.size = INSN_SIZE;
17673 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17675 it_fsm_pre_encode ();
17676 opcode->aencode ();
17677 it_fsm_post_encode ();
17679 /* Arm mode bx is marked as both v4T and v5 because it's still required
17680 on a hypothetical non-thumb v5 core. */
17682 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17684 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17685 *opcode->avariant);
17687 check_neon_suffixes;
17691 mapping_state (MAP_ARM);
17696 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17704 check_it_blocks_finished (void)
17709 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17710 if (seg_info (sect)->tc_segment_info_data.current_it.state
17711 == MANUAL_IT_BLOCK)
17713 as_warn (_("section '%s' finished with an open IT block."),
17717 if (now_it.state == MANUAL_IT_BLOCK)
17718 as_warn (_("file finished with an open IT block."));
17722 /* Various frobbings of labels and their addresses. */
17725 arm_start_line_hook (void)
17727 last_label_seen = NULL;
17731 arm_frob_label (symbolS * sym)
17733 last_label_seen = sym;
17735 ARM_SET_THUMB (sym, thumb_mode);
17737 #if defined OBJ_COFF || defined OBJ_ELF
17738 ARM_SET_INTERWORK (sym, support_interwork);
17741 force_automatic_it_block_close ();
17743 /* Note - do not allow local symbols (.Lxxx) to be labelled
17744 as Thumb functions. This is because these labels, whilst
17745 they exist inside Thumb code, are not the entry points for
17746 possible ARM->Thumb calls. Also, these labels can be used
17747 as part of a computed goto or switch statement. eg gcc
17748 can generate code that looks like this:
17750 ldr r2, [pc, .Laaa]
17760 The first instruction loads the address of the jump table.
17761 The second instruction converts a table index into a byte offset.
17762 The third instruction gets the jump address out of the table.
17763 The fourth instruction performs the jump.
17765 If the address stored at .Laaa is that of a symbol which has the
17766 Thumb_Func bit set, then the linker will arrange for this address
17767 to have the bottom bit set, which in turn would mean that the
17768 address computation performed by the third instruction would end
17769 up with the bottom bit set. Since the ARM is capable of unaligned
17770 word loads, the instruction would then load the incorrect address
17771 out of the jump table, and chaos would ensue. */
17772 if (label_is_thumb_function_name
17773 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17774 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17776 /* When the address of a Thumb function is taken the bottom
17777 bit of that address should be set. This will allow
17778 interworking between Arm and Thumb functions to work
17781 THUMB_SET_FUNC (sym, 1);
17783 label_is_thumb_function_name = FALSE;
17786 dwarf2_emit_label (sym);
17790 arm_data_in_code (void)
17792 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17794 *input_line_pointer = '/';
17795 input_line_pointer += 5;
17796 *input_line_pointer = 0;
17804 arm_canonicalize_symbol_name (char * name)
17808 if (thumb_mode && (len = strlen (name)) > 5
17809 && streq (name + len - 5, "/data"))
17810 *(name + len - 5) = 0;
17815 /* Table of all register names defined by default. The user can
17816 define additional names with .req. Note that all register names
17817 should appear in both upper and lowercase variants. Some registers
17818 also have mixed-case names. */
17820 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17821 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17822 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17823 #define REGSET(p,t) \
17824 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17825 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17826 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17827 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17828 #define REGSETH(p,t) \
17829 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17830 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17831 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17832 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17833 #define REGSET2(p,t) \
17834 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17835 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17836 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17837 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17838 #define SPLRBANK(base,bank,t) \
17839 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17840 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17841 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17842 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17843 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17844 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17846 static const struct reg_entry reg_names[] =
17848 /* ARM integer registers. */
17849 REGSET(r, RN), REGSET(R, RN),
17851 /* ATPCS synonyms. */
17852 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17853 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17854 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17856 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17857 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17858 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17860 /* Well-known aliases. */
17861 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17862 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17864 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17865 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17867 /* Coprocessor numbers. */
17868 REGSET(p, CP), REGSET(P, CP),
17870 /* Coprocessor register numbers. The "cr" variants are for backward
17872 REGSET(c, CN), REGSET(C, CN),
17873 REGSET(cr, CN), REGSET(CR, CN),
17875 /* ARM banked registers. */
17876 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17877 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17878 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17879 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17880 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17881 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17882 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17884 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17885 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17886 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17887 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17888 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17889 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
17890 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17891 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17893 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17894 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17895 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
17896 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
17897 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
17898 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
17899 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
17900 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
17901 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
17903 /* FPA registers. */
17904 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
17905 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
17907 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
17908 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
17910 /* VFP SP registers. */
17911 REGSET(s,VFS), REGSET(S,VFS),
17912 REGSETH(s,VFS), REGSETH(S,VFS),
17914 /* VFP DP Registers. */
17915 REGSET(d,VFD), REGSET(D,VFD),
17916 /* Extra Neon DP registers. */
17917 REGSETH(d,VFD), REGSETH(D,VFD),
17919 /* Neon QP registers. */
17920 REGSET2(q,NQ), REGSET2(Q,NQ),
17922 /* VFP control registers. */
17923 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
17924 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
17925 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
17926 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
17927 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
17928 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
17930 /* Maverick DSP coprocessor registers. */
17931 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
17932 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
17934 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
17935 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
17936 REGDEF(dspsc,0,DSPSC),
17938 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
17939 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
17940 REGDEF(DSPSC,0,DSPSC),
17942 /* iWMMXt data registers - p0, c0-15. */
17943 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
17945 /* iWMMXt control registers - p1, c0-3. */
17946 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
17947 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
17948 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
17949 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
17951 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
17952 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
17953 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
17954 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
17955 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
17957 /* XScale accumulator registers. */
17958 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
17964 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
17965 within psr_required_here. */
17966 static const struct asm_psr psrs[] =
17968 /* Backward compatibility notation. Note that "all" is no longer
17969 truly all possible PSR bits. */
17970 {"all", PSR_c | PSR_f},
17974 /* Individual flags. */
17980 /* Combinations of flags. */
17981 {"fs", PSR_f | PSR_s},
17982 {"fx", PSR_f | PSR_x},
17983 {"fc", PSR_f | PSR_c},
17984 {"sf", PSR_s | PSR_f},
17985 {"sx", PSR_s | PSR_x},
17986 {"sc", PSR_s | PSR_c},
17987 {"xf", PSR_x | PSR_f},
17988 {"xs", PSR_x | PSR_s},
17989 {"xc", PSR_x | PSR_c},
17990 {"cf", PSR_c | PSR_f},
17991 {"cs", PSR_c | PSR_s},
17992 {"cx", PSR_c | PSR_x},
17993 {"fsx", PSR_f | PSR_s | PSR_x},
17994 {"fsc", PSR_f | PSR_s | PSR_c},
17995 {"fxs", PSR_f | PSR_x | PSR_s},
17996 {"fxc", PSR_f | PSR_x | PSR_c},
17997 {"fcs", PSR_f | PSR_c | PSR_s},
17998 {"fcx", PSR_f | PSR_c | PSR_x},
17999 {"sfx", PSR_s | PSR_f | PSR_x},
18000 {"sfc", PSR_s | PSR_f | PSR_c},
18001 {"sxf", PSR_s | PSR_x | PSR_f},
18002 {"sxc", PSR_s | PSR_x | PSR_c},
18003 {"scf", PSR_s | PSR_c | PSR_f},
18004 {"scx", PSR_s | PSR_c | PSR_x},
18005 {"xfs", PSR_x | PSR_f | PSR_s},
18006 {"xfc", PSR_x | PSR_f | PSR_c},
18007 {"xsf", PSR_x | PSR_s | PSR_f},
18008 {"xsc", PSR_x | PSR_s | PSR_c},
18009 {"xcf", PSR_x | PSR_c | PSR_f},
18010 {"xcs", PSR_x | PSR_c | PSR_s},
18011 {"cfs", PSR_c | PSR_f | PSR_s},
18012 {"cfx", PSR_c | PSR_f | PSR_x},
18013 {"csf", PSR_c | PSR_s | PSR_f},
18014 {"csx", PSR_c | PSR_s | PSR_x},
18015 {"cxf", PSR_c | PSR_x | PSR_f},
18016 {"cxs", PSR_c | PSR_x | PSR_s},
18017 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18018 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18019 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18020 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18021 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18022 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18023 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18024 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18025 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18026 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18027 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18028 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18029 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18030 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18031 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18032 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18033 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18034 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18035 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18036 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18037 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18038 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18039 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18040 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18043 /* Table of V7M psr names. */
18044 static const struct asm_psr v7m_psrs[] =
18046 {"apsr", 0 }, {"APSR", 0 },
18047 {"iapsr", 1 }, {"IAPSR", 1 },
18048 {"eapsr", 2 }, {"EAPSR", 2 },
18049 {"psr", 3 }, {"PSR", 3 },
18050 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18051 {"ipsr", 5 }, {"IPSR", 5 },
18052 {"epsr", 6 }, {"EPSR", 6 },
18053 {"iepsr", 7 }, {"IEPSR", 7 },
18054 {"msp", 8 }, {"MSP", 8 },
18055 {"psp", 9 }, {"PSP", 9 },
18056 {"primask", 16}, {"PRIMASK", 16},
18057 {"basepri", 17}, {"BASEPRI", 17},
18058 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18059 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18060 {"faultmask", 19}, {"FAULTMASK", 19},
18061 {"control", 20}, {"CONTROL", 20}
18064 /* Table of all shift-in-operand names. */
18065 static const struct asm_shift_name shift_names [] =
18067 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18068 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18069 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18070 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18071 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18072 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18075 /* Table of all explicit relocation names. */
18077 static struct reloc_entry reloc_names[] =
18079 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18080 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18081 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18082 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18083 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18084 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18085 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18086 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18087 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18088 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18089 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18090 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18091 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18092 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18093 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18094 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18095 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18096 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18100 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18101 static const struct asm_cond conds[] =
18105 {"cs", 0x2}, {"hs", 0x2},
18106 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18120 #define UL_BARRIER(L,U,CODE,FEAT) \
18121 { L, CODE, ARM_FEATURE (FEAT, 0) }, \
18122 { U, CODE, ARM_FEATURE (FEAT, 0) }
18124 static struct asm_barrier_opt barrier_opt_names[] =
18126 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18127 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18128 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18129 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18130 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18131 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18132 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18133 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18134 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18135 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18136 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18137 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18138 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18139 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18140 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18141 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18146 /* Table of ARM-format instructions. */
18148 /* Macros for gluing together operand strings. N.B. In all cases
18149 other than OPS0, the trailing OP_stop comes from default
18150 zero-initialization of the unspecified elements of the array. */
18151 #define OPS0() { OP_stop, }
18152 #define OPS1(a) { OP_##a, }
18153 #define OPS2(a,b) { OP_##a,OP_##b, }
18154 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18155 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18156 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18157 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18159 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18160 This is useful when mixing operands for ARM and THUMB, i.e. using the
18161 MIX_ARM_THUMB_OPERANDS macro.
18162 In order to use these macros, prefix the number of operands with _
18164 #define OPS_1(a) { a, }
18165 #define OPS_2(a,b) { a,b, }
18166 #define OPS_3(a,b,c) { a,b,c, }
18167 #define OPS_4(a,b,c,d) { a,b,c,d, }
18168 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18169 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18171 /* These macros abstract out the exact format of the mnemonic table and
18172 save some repeated characters. */
18174 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18175 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18176 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18177 THUMB_VARIANT, do_##ae, do_##te }
18179 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18180 a T_MNEM_xyz enumerator. */
18181 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18182 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18183 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18184 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18186 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18187 infix after the third character. */
18188 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18189 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18190 THUMB_VARIANT, do_##ae, do_##te }
18191 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18192 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18193 THUMB_VARIANT, do_##ae, do_##te }
18194 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18195 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18196 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18197 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18198 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18199 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18200 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18201 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18203 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18204 field is still 0xE. Many of the Thumb variants can be executed
18205 conditionally, so this is checked separately. */
18206 #define TUE(mnem, op, top, nops, ops, ae, te) \
18207 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18208 THUMB_VARIANT, do_##ae, do_##te }
18210 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18211 Used by mnemonics that have very minimal differences in the encoding for
18212 ARM and Thumb variants and can be handled in a common function. */
18213 #define TUEc(mnem, op, top, nops, ops, en) \
18214 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18215 THUMB_VARIANT, do_##en, do_##en }
18217 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18218 condition code field. */
18219 #define TUF(mnem, op, top, nops, ops, ae, te) \
18220 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18221 THUMB_VARIANT, do_##ae, do_##te }
18223 /* ARM-only variants of all the above. */
18224 #define CE(mnem, op, nops, ops, ae) \
18225 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18227 #define C3(mnem, op, nops, ops, ae) \
18228 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18230 /* Legacy mnemonics that always have conditional infix after the third
18232 #define CL(mnem, op, nops, ops, ae) \
18233 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18234 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18236 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18237 #define cCE(mnem, op, nops, ops, ae) \
18238 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18240 /* Legacy coprocessor instructions where conditional infix and conditional
18241 suffix are ambiguous. For consistency this includes all FPA instructions,
18242 not just the potentially ambiguous ones. */
18243 #define cCL(mnem, op, nops, ops, ae) \
18244 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18245 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18247 /* Coprocessor, takes either a suffix or a position-3 infix
18248 (for an FPA corner case). */
18249 #define C3E(mnem, op, nops, ops, ae) \
18250 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18251 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18253 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18254 { m1 #m2 m3, OPS##nops ops, \
18255 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18256 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18258 #define CM(m1, m2, op, nops, ops, ae) \
18259 xCM_ (m1, , m2, op, nops, ops, ae), \
18260 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18261 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18262 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18263 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18264 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18265 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18266 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18267 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18268 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18269 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18270 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18271 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18272 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18273 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18274 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18275 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18276 xCM_ (m1, le, m2, op, nops, ops, ae), \
18277 xCM_ (m1, al, m2, op, nops, ops, ae)
18279 #define UE(mnem, op, nops, ops, ae) \
18280 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18282 #define UF(mnem, op, nops, ops, ae) \
18283 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18285 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18286 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18287 use the same encoding function for each. */
18288 #define NUF(mnem, op, nops, ops, enc) \
18289 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18290 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18292 /* Neon data processing, version which indirects through neon_enc_tab for
18293 the various overloaded versions of opcodes. */
18294 #define nUF(mnem, op, nops, ops, enc) \
18295 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18296 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18298 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18300 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18301 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18302 THUMB_VARIANT, do_##enc, do_##enc }
18304 #define NCE(mnem, op, nops, ops, enc) \
18305 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18307 #define NCEF(mnem, op, nops, ops, enc) \
18308 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18310 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18311 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18312 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18313 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18315 #define nCE(mnem, op, nops, ops, enc) \
18316 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18318 #define nCEF(mnem, op, nops, ops, enc) \
18319 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18323 static const struct asm_opcode insns[] =
18325 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18326 #define THUMB_VARIANT & arm_ext_v4t
18327 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18328 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18329 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18330 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18331 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18332 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18333 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18334 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18335 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18336 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18337 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18338 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18339 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18340 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18341 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18342 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18344 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18345 for setting PSR flag bits. They are obsolete in V6 and do not
18346 have Thumb equivalents. */
18347 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18348 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18349 CL("tstp", 110f000, 2, (RR, SH), cmp),
18350 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18351 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18352 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18353 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18354 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18355 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18357 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18358 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
18359 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18360 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18362 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18363 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18364 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18366 OP_ADDRGLDR),ldst, t_ldst),
18367 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18369 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18370 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18371 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18372 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18373 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18374 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18376 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18377 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18378 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18379 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18382 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18383 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18384 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18385 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18387 /* Thumb-compatibility pseudo ops. */
18388 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18389 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18390 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18391 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18392 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18393 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18394 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18395 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18396 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18397 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18398 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18399 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18401 /* These may simplify to neg. */
18402 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18403 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18405 #undef THUMB_VARIANT
18406 #define THUMB_VARIANT & arm_ext_v6
18408 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18410 /* V1 instructions with no Thumb analogue prior to V6T2. */
18411 #undef THUMB_VARIANT
18412 #define THUMB_VARIANT & arm_ext_v6t2
18414 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18415 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18416 CL("teqp", 130f000, 2, (RR, SH), cmp),
18418 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18419 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18420 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18421 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18423 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18424 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18426 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18427 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18429 /* V1 instructions with no Thumb analogue at all. */
18430 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18431 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18433 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18434 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18435 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18436 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18437 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18438 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18439 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18440 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18443 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18444 #undef THUMB_VARIANT
18445 #define THUMB_VARIANT & arm_ext_v4t
18447 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18448 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18450 #undef THUMB_VARIANT
18451 #define THUMB_VARIANT & arm_ext_v6t2
18453 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18454 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18456 /* Generic coprocessor instructions. */
18457 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18458 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18459 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18460 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18461 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18462 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18463 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18466 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18468 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18469 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18472 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18473 #undef THUMB_VARIANT
18474 #define THUMB_VARIANT & arm_ext_msr
18476 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18477 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18480 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18481 #undef THUMB_VARIANT
18482 #define THUMB_VARIANT & arm_ext_v6t2
18484 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18485 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18486 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18487 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18488 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18489 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18490 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18491 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18494 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18495 #undef THUMB_VARIANT
18496 #define THUMB_VARIANT & arm_ext_v4t
18498 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18499 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18500 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18501 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18502 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18503 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18506 #define ARM_VARIANT & arm_ext_v4t_5
18508 /* ARM Architecture 4T. */
18509 /* Note: bx (and blx) are required on V5, even if the processor does
18510 not support Thumb. */
18511 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18514 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18515 #undef THUMB_VARIANT
18516 #define THUMB_VARIANT & arm_ext_v5t
18518 /* Note: blx has 2 variants; the .value coded here is for
18519 BLX(2). Only this variant has conditional execution. */
18520 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18521 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18523 #undef THUMB_VARIANT
18524 #define THUMB_VARIANT & arm_ext_v6t2
18526 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18527 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18528 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18529 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18530 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18531 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18532 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18533 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18536 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18537 #undef THUMB_VARIANT
18538 #define THUMB_VARIANT & arm_ext_v5exp
18540 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18541 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18542 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18543 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18545 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18546 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18548 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18549 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18550 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18551 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18553 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18554 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18555 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18556 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18558 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18559 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18561 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18562 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18563 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18564 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18567 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18568 #undef THUMB_VARIANT
18569 #define THUMB_VARIANT & arm_ext_v6t2
18571 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18572 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18574 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18575 ADDRGLDRS), ldrd, t_ldstd),
18577 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18578 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18581 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18583 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18586 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18587 #undef THUMB_VARIANT
18588 #define THUMB_VARIANT & arm_ext_v6
18590 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18591 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18592 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18593 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18594 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18595 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18596 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18597 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18598 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18599 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18601 #undef THUMB_VARIANT
18602 #define THUMB_VARIANT & arm_ext_v6t2
18604 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18605 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18607 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18608 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18610 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18611 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18613 /* ARM V6 not included in V7M. */
18614 #undef THUMB_VARIANT
18615 #define THUMB_VARIANT & arm_ext_v6_notm
18616 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18617 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18618 UF(rfeib, 9900a00, 1, (RRw), rfe),
18619 UF(rfeda, 8100a00, 1, (RRw), rfe),
18620 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18621 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18622 UF(rfefa, 8100a00, 1, (RRw), rfe),
18623 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18624 UF(rfeed, 9900a00, 1, (RRw), rfe),
18625 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18626 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18627 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18628 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18629 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18630 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18631 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18632 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18633 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18635 /* ARM V6 not included in V7M (eg. integer SIMD). */
18636 #undef THUMB_VARIANT
18637 #define THUMB_VARIANT & arm_ext_v6_dsp
18638 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18639 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18640 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18641 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18642 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18643 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18644 /* Old name for QASX. */
18645 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18646 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18647 /* Old name for QSAX. */
18648 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18649 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18650 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18651 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18652 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18653 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18654 /* Old name for SASX. */
18655 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18656 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18657 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18658 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18659 /* Old name for SHASX. */
18660 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18661 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18662 /* Old name for SHSAX. */
18663 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18664 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18665 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18666 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18667 /* Old name for SSAX. */
18668 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18669 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18670 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18671 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18672 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18673 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18674 /* Old name for UASX. */
18675 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18676 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18677 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18678 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18679 /* Old name for UHASX. */
18680 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18681 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18682 /* Old name for UHSAX. */
18683 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18684 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18685 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18686 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18687 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18688 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18689 /* Old name for UQASX. */
18690 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18691 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18692 /* Old name for UQSAX. */
18693 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18694 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18695 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18696 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18697 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18698 /* Old name for USAX. */
18699 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18700 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18701 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18702 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18703 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18704 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18705 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18706 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18707 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18708 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18709 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18710 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18711 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18712 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18713 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18714 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18715 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18716 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18717 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18718 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18719 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18720 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18721 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18722 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18723 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18724 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18725 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18726 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18727 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18728 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18729 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18730 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18731 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18732 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18735 #define ARM_VARIANT & arm_ext_v6k
18736 #undef THUMB_VARIANT
18737 #define THUMB_VARIANT & arm_ext_v6k
18739 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18740 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18741 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18742 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18744 #undef THUMB_VARIANT
18745 #define THUMB_VARIANT & arm_ext_v6_notm
18746 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18748 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18749 RRnpcb), strexd, t_strexd),
18751 #undef THUMB_VARIANT
18752 #define THUMB_VARIANT & arm_ext_v6t2
18753 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18755 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18757 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18759 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18761 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18764 #define ARM_VARIANT & arm_ext_sec
18765 #undef THUMB_VARIANT
18766 #define THUMB_VARIANT & arm_ext_sec
18768 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18771 #define ARM_VARIANT & arm_ext_virt
18772 #undef THUMB_VARIANT
18773 #define THUMB_VARIANT & arm_ext_virt
18775 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18776 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18779 #define ARM_VARIANT & arm_ext_v6t2
18780 #undef THUMB_VARIANT
18781 #define THUMB_VARIANT & arm_ext_v6t2
18783 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18784 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18785 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18786 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18788 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18789 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18790 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18791 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18793 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18794 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18795 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18796 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18798 /* Thumb-only instructions. */
18800 #define ARM_VARIANT NULL
18801 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18802 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18804 /* ARM does not really have an IT instruction, so always allow it.
18805 The opcode is copied from Thumb in order to allow warnings in
18806 -mimplicit-it=[never | arm] modes. */
18808 #define ARM_VARIANT & arm_ext_v1
18810 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18811 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18812 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18813 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18814 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18815 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18816 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18817 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18818 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18819 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18820 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18821 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18822 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18823 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18824 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18825 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18826 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18827 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18829 /* Thumb2 only instructions. */
18831 #define ARM_VARIANT NULL
18833 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18834 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18835 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18836 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18837 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18838 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18840 /* Hardware division instructions. */
18842 #define ARM_VARIANT & arm_ext_adiv
18843 #undef THUMB_VARIANT
18844 #define THUMB_VARIANT & arm_ext_div
18846 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18847 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18849 /* ARM V6M/V7 instructions. */
18851 #define ARM_VARIANT & arm_ext_barrier
18852 #undef THUMB_VARIANT
18853 #define THUMB_VARIANT & arm_ext_barrier
18855 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
18856 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
18857 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
18859 /* ARM V7 instructions. */
18861 #define ARM_VARIANT & arm_ext_v7
18862 #undef THUMB_VARIANT
18863 #define THUMB_VARIANT & arm_ext_v7
18865 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18866 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18869 #define ARM_VARIANT & arm_ext_mp
18870 #undef THUMB_VARIANT
18871 #define THUMB_VARIANT & arm_ext_mp
18873 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18875 /* AArchv8 instructions. */
18877 #define ARM_VARIANT & arm_ext_v8
18878 #undef THUMB_VARIANT
18879 #define THUMB_VARIANT & arm_ext_v8
18881 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18882 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18883 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18884 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18886 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18887 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18888 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18890 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18892 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18894 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18896 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18897 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18898 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18899 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18900 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18901 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18903 /* ARMv8 T32 only. */
18905 #define ARM_VARIANT NULL
18906 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
18907 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
18908 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
18910 /* FP for ARMv8. */
18912 #define ARM_VARIANT & fpu_vfp_ext_armv8
18913 #undef THUMB_VARIANT
18914 #define THUMB_VARIANT & fpu_vfp_ext_armv8
18916 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
18917 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
18918 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
18919 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
18920 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18921 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18922 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
18923 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
18924 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
18925 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
18926 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
18927 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
18928 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
18929 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
18930 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
18931 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
18932 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
18934 /* Crypto v1 extensions. */
18936 #define ARM_VARIANT & fpu_crypto_ext_armv8
18937 #undef THUMB_VARIANT
18938 #define THUMB_VARIANT & fpu_crypto_ext_armv8
18940 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
18941 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
18942 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
18943 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
18944 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
18945 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
18946 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
18947 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
18948 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
18949 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
18950 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
18951 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
18952 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
18953 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
18956 #define ARM_VARIANT & crc_ext_armv8
18957 #undef THUMB_VARIANT
18958 #define THUMB_VARIANT & crc_ext_armv8
18959 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
18960 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
18961 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
18962 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
18963 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
18964 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
18967 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
18968 #undef THUMB_VARIANT
18969 #define THUMB_VARIANT NULL
18971 cCE("wfs", e200110, 1, (RR), rd),
18972 cCE("rfs", e300110, 1, (RR), rd),
18973 cCE("wfc", e400110, 1, (RR), rd),
18974 cCE("rfc", e500110, 1, (RR), rd),
18976 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
18977 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
18978 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
18979 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
18981 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
18982 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
18983 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
18984 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
18986 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
18987 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
18988 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
18989 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
18990 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
18991 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
18992 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
18993 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
18994 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
18995 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
18996 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
18997 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
18999 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19000 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19001 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19002 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19003 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19004 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19005 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19006 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19007 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19008 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19009 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19010 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19012 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19013 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19014 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19015 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19016 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19017 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19018 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19019 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19020 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19021 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19022 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19023 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19025 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19026 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19027 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19028 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19029 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19030 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19031 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19032 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19033 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19034 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19035 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19036 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19038 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19039 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19040 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19041 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19042 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19043 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19044 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19045 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19046 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19047 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19048 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19049 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19051 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19052 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19053 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19054 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19055 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19056 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19057 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19058 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19059 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19060 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19061 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19062 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19064 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19065 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19066 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19067 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19068 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19069 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19070 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19071 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19072 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19073 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19074 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19075 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19077 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19078 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19079 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19080 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19081 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19082 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19083 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19084 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19085 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19086 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19087 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19088 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19090 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19091 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19092 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19093 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19094 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19095 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19096 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19097 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19098 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19099 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19100 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19101 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19103 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19104 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19105 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19106 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19107 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19108 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19109 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19110 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19111 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19112 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19113 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19114 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19116 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19117 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19118 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19119 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19120 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19121 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19122 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19123 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19124 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19125 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19126 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19127 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19129 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19130 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19131 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19132 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19133 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19134 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19135 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19136 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19137 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19138 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19139 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19140 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19142 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19143 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19144 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19145 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19146 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19147 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19148 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19149 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19150 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19151 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19152 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19153 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19155 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19156 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19157 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19158 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19159 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19160 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19161 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19162 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19163 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19164 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19165 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19166 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19168 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19169 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19170 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19171 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19172 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19173 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19174 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19175 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19176 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19177 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19178 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19179 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19181 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19182 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19183 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19184 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19185 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19186 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19187 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19188 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19189 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19190 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19191 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19192 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19194 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19195 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19196 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19197 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19198 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19199 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19200 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19201 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19202 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19203 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19204 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19205 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19207 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19208 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19209 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19210 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19211 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19212 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19213 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19214 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19215 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19216 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19217 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19218 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19220 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19221 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19222 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19223 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19224 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19225 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19226 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19227 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19228 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19229 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19230 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19231 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19233 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19234 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19235 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19236 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19237 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19238 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19239 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19240 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19241 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19242 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19243 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19244 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19246 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19247 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19248 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19249 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19250 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19251 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19252 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19253 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19254 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19255 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19256 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19257 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19259 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19260 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19261 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19262 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19263 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19264 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19265 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19266 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19267 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19268 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19269 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19270 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19272 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19273 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19274 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19275 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19276 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19277 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19278 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19279 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19280 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19281 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19282 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19283 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19285 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19286 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19287 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19288 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19289 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19290 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19291 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19292 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19293 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19294 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19295 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19296 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19298 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19299 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19300 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19301 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19302 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19303 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19304 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19305 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19306 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19307 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19308 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19309 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19311 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19312 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19313 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19314 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19315 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19316 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19317 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19318 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19319 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19320 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19321 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19322 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19324 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19325 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19326 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19327 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19328 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19329 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19330 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19331 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19332 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19333 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19334 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19335 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19337 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19338 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19339 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19340 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19341 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19342 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19343 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19344 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19345 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19346 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19347 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19348 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19350 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19351 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19352 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19353 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19354 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19355 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19356 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19357 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19358 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19359 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19360 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19361 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19363 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19364 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19365 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19366 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19368 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19369 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19370 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19371 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19372 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19373 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19374 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19375 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19376 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19377 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19378 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19379 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19381 /* The implementation of the FIX instruction is broken on some
19382 assemblers, in that it accepts a precision specifier as well as a
19383 rounding specifier, despite the fact that this is meaningless.
19384 To be more compatible, we accept it as well, though of course it
19385 does not set any bits. */
19386 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19387 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19388 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19389 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19390 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19391 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19392 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19393 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19394 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19395 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19396 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19397 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19398 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19400 /* Instructions that were new with the real FPA, call them V2. */
19402 #define ARM_VARIANT & fpu_fpa_ext_v2
19404 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19405 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19406 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19407 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19408 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19409 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19412 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19414 /* Moves and type conversions. */
19415 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19416 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19417 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19418 cCE("fmstat", ef1fa10, 0, (), noargs),
19419 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19420 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19421 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19422 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19423 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19424 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19425 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19426 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19427 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19428 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19430 /* Memory operations. */
19431 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19432 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19433 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19434 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19435 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19436 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19437 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19438 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19439 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19440 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19441 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19442 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19443 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19444 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19445 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19446 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19447 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19448 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19450 /* Monadic operations. */
19451 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19452 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19453 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19455 /* Dyadic operations. */
19456 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19457 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19458 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19459 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19460 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19461 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19462 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19463 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19464 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19467 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19468 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19469 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19470 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19472 /* Double precision load/store are still present on single precision
19473 implementations. */
19474 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19475 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19476 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19477 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19478 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19479 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19480 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19481 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19482 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19483 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19486 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19488 /* Moves and type conversions. */
19489 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19490 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19491 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19492 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19493 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19494 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19495 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19496 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19497 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19498 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19499 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19500 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19501 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19503 /* Monadic operations. */
19504 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19505 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19506 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19508 /* Dyadic operations. */
19509 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19510 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19511 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19512 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19513 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19514 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19515 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19516 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19517 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19520 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19521 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19522 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19523 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19526 #define ARM_VARIANT & fpu_vfp_ext_v2
19528 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19529 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19530 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19531 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19533 /* Instructions which may belong to either the Neon or VFP instruction sets.
19534 Individual encoder functions perform additional architecture checks. */
19536 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19537 #undef THUMB_VARIANT
19538 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19540 /* These mnemonics are unique to VFP. */
19541 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19542 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19543 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19544 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19545 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19546 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19547 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19548 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19549 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19550 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19552 /* Mnemonics shared by Neon and VFP. */
19553 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19554 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19555 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19557 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19558 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19560 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19561 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19563 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19564 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19565 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19566 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19567 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19568 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19569 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19570 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19572 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19573 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19574 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19575 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19578 /* NOTE: All VMOV encoding is special-cased! */
19579 NCE(vmov, 0, 1, (VMOV), neon_mov),
19580 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19582 #undef THUMB_VARIANT
19583 #define THUMB_VARIANT & fpu_neon_ext_v1
19585 #define ARM_VARIANT & fpu_neon_ext_v1
19587 /* Data processing with three registers of the same length. */
19588 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19589 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19590 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19591 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19592 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19593 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19594 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19595 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19596 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19597 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19598 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19599 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19600 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19601 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19602 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19603 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19604 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19605 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19606 /* If not immediate, fall back to neon_dyadic_i64_su.
19607 shl_imm should accept I8 I16 I32 I64,
19608 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19609 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19610 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19611 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19612 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19613 /* Logic ops, types optional & ignored. */
19614 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19615 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19616 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19617 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19618 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19619 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19620 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19621 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19622 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19623 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19624 /* Bitfield ops, untyped. */
19625 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19626 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19627 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19628 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19629 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19630 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19631 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19632 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19633 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19634 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19635 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19636 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19637 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19638 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19639 back to neon_dyadic_if_su. */
19640 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19641 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19642 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19643 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19644 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19645 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19646 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19647 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19648 /* Comparison. Type I8 I16 I32 F32. */
19649 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19650 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19651 /* As above, D registers only. */
19652 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19653 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19654 /* Int and float variants, signedness unimportant. */
19655 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19656 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19657 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19658 /* Add/sub take types I8 I16 I32 I64 F32. */
19659 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19660 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19661 /* vtst takes sizes 8, 16, 32. */
19662 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19663 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19664 /* VMUL takes I8 I16 I32 F32 P8. */
19665 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19666 /* VQD{R}MULH takes S16 S32. */
19667 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19668 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19669 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19670 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19671 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19672 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19673 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19674 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19675 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19676 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19677 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19678 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19679 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19680 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19681 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19682 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19684 /* Two address, int/float. Types S8 S16 S32 F32. */
19685 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19686 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19688 /* Data processing with two registers and a shift amount. */
19689 /* Right shifts, and variants with rounding.
19690 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19691 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19692 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19693 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19694 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19695 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19696 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19697 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19698 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19699 /* Shift and insert. Sizes accepted 8 16 32 64. */
19700 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19701 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19702 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19703 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19704 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19705 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19706 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19707 /* Right shift immediate, saturating & narrowing, with rounding variants.
19708 Types accepted S16 S32 S64 U16 U32 U64. */
19709 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19710 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19711 /* As above, unsigned. Types accepted S16 S32 S64. */
19712 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19713 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19714 /* Right shift narrowing. Types accepted I16 I32 I64. */
19715 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19716 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19717 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19718 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19719 /* CVT with optional immediate for fixed-point variant. */
19720 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19722 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19723 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19725 /* Data processing, three registers of different lengths. */
19726 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19727 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19728 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19729 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19730 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19731 /* If not scalar, fall back to neon_dyadic_long.
19732 Vector types as above, scalar types S16 S32 U16 U32. */
19733 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19734 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19735 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19736 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19737 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19738 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19739 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19740 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19741 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19742 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19743 /* Saturating doubling multiplies. Types S16 S32. */
19744 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19745 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19746 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19747 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19748 S16 S32 U16 U32. */
19749 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19751 /* Extract. Size 8. */
19752 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19753 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19755 /* Two registers, miscellaneous. */
19756 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19757 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19758 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19759 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19760 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19761 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19762 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19763 /* Vector replicate. Sizes 8 16 32. */
19764 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19765 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19766 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19767 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19768 /* VMOVN. Types I16 I32 I64. */
19769 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19770 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19771 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19772 /* VQMOVUN. Types S16 S32 S64. */
19773 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19774 /* VZIP / VUZP. Sizes 8 16 32. */
19775 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19776 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19777 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19778 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19779 /* VQABS / VQNEG. Types S8 S16 S32. */
19780 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19781 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19782 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19783 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19784 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19785 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19786 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19787 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19788 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19789 /* Reciprocal estimates. Types U32 F32. */
19790 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19791 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19792 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19793 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19794 /* VCLS. Types S8 S16 S32. */
19795 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19796 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19797 /* VCLZ. Types I8 I16 I32. */
19798 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19799 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19800 /* VCNT. Size 8. */
19801 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19802 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19803 /* Two address, untyped. */
19804 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19805 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19806 /* VTRN. Sizes 8 16 32. */
19807 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19808 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19810 /* Table lookup. Size 8. */
19811 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19812 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19814 #undef THUMB_VARIANT
19815 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19817 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19819 /* Neon element/structure load/store. */
19820 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19821 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19822 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19823 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19824 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19825 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19826 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19827 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19829 #undef THUMB_VARIANT
19830 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
19832 #define ARM_VARIANT & fpu_vfp_ext_v3xd
19833 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19834 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19835 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19836 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19837 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19838 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19839 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19840 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19841 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19843 #undef THUMB_VARIANT
19844 #define THUMB_VARIANT & fpu_vfp_ext_v3
19846 #define ARM_VARIANT & fpu_vfp_ext_v3
19848 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19849 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19850 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19851 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19852 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19853 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19854 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19855 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19856 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19859 #define ARM_VARIANT & fpu_vfp_ext_fma
19860 #undef THUMB_VARIANT
19861 #define THUMB_VARIANT & fpu_vfp_ext_fma
19862 /* Mnemonics shared by Neon and VFP. These are included in the
19863 VFP FMA variant; NEON and VFP FMA always includes the NEON
19864 FMA instructions. */
19865 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19866 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19867 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19868 the v form should always be used. */
19869 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19870 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19871 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19872 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19873 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19874 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19876 #undef THUMB_VARIANT
19878 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19880 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19881 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19882 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19883 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19884 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19885 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19886 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19887 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19890 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
19892 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
19893 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
19894 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
19895 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
19896 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
19897 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
19898 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
19899 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
19900 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
19901 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19902 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19903 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19904 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19905 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19906 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19907 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19908 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19909 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19910 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
19911 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
19912 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19913 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19914 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19915 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19916 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19917 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19918 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
19919 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
19920 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
19921 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
19922 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
19923 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
19924 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
19925 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
19926 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
19927 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
19928 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
19929 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19930 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19931 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19932 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19933 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19934 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19935 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19936 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19937 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19938 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
19939 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19940 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19941 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19942 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19943 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19944 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19945 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19946 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19947 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19948 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19949 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19950 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19951 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19952 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19953 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19954 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19955 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19956 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19957 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19958 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19959 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19960 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
19961 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
19962 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19963 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19964 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19965 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19966 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19967 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19968 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19969 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19970 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19971 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19972 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19973 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19974 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19975 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19976 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19977 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19978 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19979 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19980 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
19981 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19982 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19983 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19984 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19985 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19986 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19987 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19988 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19989 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19990 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19991 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19992 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19993 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19994 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19995 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19996 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19997 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19998 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19999 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20000 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20001 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20002 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20003 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20004 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20005 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20006 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20007 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20008 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20009 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20010 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20011 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20012 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20013 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20014 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20015 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20016 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20017 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20018 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20019 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20020 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20021 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20022 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20023 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20024 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20025 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20026 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20027 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20028 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20029 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20030 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20031 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20032 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20033 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20034 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20035 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20036 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20037 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20038 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20039 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20040 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20041 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20042 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20043 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20044 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20045 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20046 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20047 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20048 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20049 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20050 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20051 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20052 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20053 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20056 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20058 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20059 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20060 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20061 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20062 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20063 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20064 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20065 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20066 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20067 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20068 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20069 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20070 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20071 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20072 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20073 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20074 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20075 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20076 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20077 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20078 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20079 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20080 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20081 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20082 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20083 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20084 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20085 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20086 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20087 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20088 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20089 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20090 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20091 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20092 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20093 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20094 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20095 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20096 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20097 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20098 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20099 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20100 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20101 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20102 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20103 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20104 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20105 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20106 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20107 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20108 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20109 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20110 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20111 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20112 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20113 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20114 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20117 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20119 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20120 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20121 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20122 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20123 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20124 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20125 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20126 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20127 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20128 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20129 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20130 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20131 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20132 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20133 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20134 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20135 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20136 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20137 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20138 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20139 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20140 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20141 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20142 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20143 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20144 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20145 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20146 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20147 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20148 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20149 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20150 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20151 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20152 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20153 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20154 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20155 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20156 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20157 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20158 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20159 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20160 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20161 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20162 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20163 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20164 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20165 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20166 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20167 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20168 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20169 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20170 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20171 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20172 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20173 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20174 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20175 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20176 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20177 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20178 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20179 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20180 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20181 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20182 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20183 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20184 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20185 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20186 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20187 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20188 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20189 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20190 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20191 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20192 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20193 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20194 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20197 #undef THUMB_VARIANT
20223 /* MD interface: bits in the object file. */
20225 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20226 for use in the a.out file, and stores them in the array pointed to by buf.
20227 This knows about the endian-ness of the target machine and does
20228 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20229 2 (short) and 4 (long) Floating numbers are put out as a series of
20230 LITTLENUMS (shorts, here at least). */
20233 md_number_to_chars (char * buf, valueT val, int n)
20235 if (target_big_endian)
20236 number_to_chars_bigendian (buf, val, n);
20238 number_to_chars_littleendian (buf, val, n);
20242 md_chars_to_number (char * buf, int n)
20245 unsigned char * where = (unsigned char *) buf;
20247 if (target_big_endian)
20252 result |= (*where++ & 255);
20260 result |= (where[n] & 255);
20267 /* MD interface: Sections. */
20269 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20270 that an rs_machine_dependent frag may reach. */
20273 arm_frag_max_var (fragS *fragp)
20275 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20276 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20278 Note that we generate relaxable instructions even for cases that don't
20279 really need it, like an immediate that's a trivial constant. So we're
20280 overestimating the instruction size for some of those cases. Rather
20281 than putting more intelligence here, it would probably be better to
20282 avoid generating a relaxation frag in the first place when it can be
20283 determined up front that a short instruction will suffice. */
20285 gas_assert (fragp->fr_type == rs_machine_dependent);
20289 /* Estimate the size of a frag before relaxing. Assume everything fits in
20293 md_estimate_size_before_relax (fragS * fragp,
20294 segT segtype ATTRIBUTE_UNUSED)
20300 /* Convert a machine dependent frag. */
20303 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20305 unsigned long insn;
20306 unsigned long old_op;
20314 buf = fragp->fr_literal + fragp->fr_fix;
20316 old_op = bfd_get_16(abfd, buf);
20317 if (fragp->fr_symbol)
20319 exp.X_op = O_symbol;
20320 exp.X_add_symbol = fragp->fr_symbol;
20324 exp.X_op = O_constant;
20326 exp.X_add_number = fragp->fr_offset;
20327 opcode = fragp->fr_subtype;
20330 case T_MNEM_ldr_pc:
20331 case T_MNEM_ldr_pc2:
20332 case T_MNEM_ldr_sp:
20333 case T_MNEM_str_sp:
20340 if (fragp->fr_var == 4)
20342 insn = THUMB_OP32 (opcode);
20343 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20345 insn |= (old_op & 0x700) << 4;
20349 insn |= (old_op & 7) << 12;
20350 insn |= (old_op & 0x38) << 13;
20352 insn |= 0x00000c00;
20353 put_thumb32_insn (buf, insn);
20354 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20358 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20360 pc_rel = (opcode == T_MNEM_ldr_pc2);
20363 if (fragp->fr_var == 4)
20365 insn = THUMB_OP32 (opcode);
20366 insn |= (old_op & 0xf0) << 4;
20367 put_thumb32_insn (buf, insn);
20368 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20372 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20373 exp.X_add_number -= 4;
20381 if (fragp->fr_var == 4)
20383 int r0off = (opcode == T_MNEM_mov
20384 || opcode == T_MNEM_movs) ? 0 : 8;
20385 insn = THUMB_OP32 (opcode);
20386 insn = (insn & 0xe1ffffff) | 0x10000000;
20387 insn |= (old_op & 0x700) << r0off;
20388 put_thumb32_insn (buf, insn);
20389 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20393 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20398 if (fragp->fr_var == 4)
20400 insn = THUMB_OP32(opcode);
20401 put_thumb32_insn (buf, insn);
20402 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20405 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20409 if (fragp->fr_var == 4)
20411 insn = THUMB_OP32(opcode);
20412 insn |= (old_op & 0xf00) << 14;
20413 put_thumb32_insn (buf, insn);
20414 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20417 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20420 case T_MNEM_add_sp:
20421 case T_MNEM_add_pc:
20422 case T_MNEM_inc_sp:
20423 case T_MNEM_dec_sp:
20424 if (fragp->fr_var == 4)
20426 /* ??? Choose between add and addw. */
20427 insn = THUMB_OP32 (opcode);
20428 insn |= (old_op & 0xf0) << 4;
20429 put_thumb32_insn (buf, insn);
20430 if (opcode == T_MNEM_add_pc)
20431 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20433 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20436 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20444 if (fragp->fr_var == 4)
20446 insn = THUMB_OP32 (opcode);
20447 insn |= (old_op & 0xf0) << 4;
20448 insn |= (old_op & 0xf) << 16;
20449 put_thumb32_insn (buf, insn);
20450 if (insn & (1 << 20))
20451 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20453 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20456 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20462 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20463 (enum bfd_reloc_code_real) reloc_type);
20464 fixp->fx_file = fragp->fr_file;
20465 fixp->fx_line = fragp->fr_line;
20466 fragp->fr_fix += fragp->fr_var;
20469 /* Return the size of a relaxable immediate operand instruction.
20470 SHIFT and SIZE specify the form of the allowable immediate. */
20472 relax_immediate (fragS *fragp, int size, int shift)
20478 /* ??? Should be able to do better than this. */
20479 if (fragp->fr_symbol)
20482 low = (1 << shift) - 1;
20483 mask = (1 << (shift + size)) - (1 << shift);
20484 offset = fragp->fr_offset;
20485 /* Force misaligned offsets to 32-bit variant. */
20488 if (offset & ~mask)
20493 /* Get the address of a symbol during relaxation. */
20495 relaxed_symbol_addr (fragS *fragp, long stretch)
20501 sym = fragp->fr_symbol;
20502 sym_frag = symbol_get_frag (sym);
20503 know (S_GET_SEGMENT (sym) != absolute_section
20504 || sym_frag == &zero_address_frag);
20505 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20507 /* If frag has yet to be reached on this pass, assume it will
20508 move by STRETCH just as we did. If this is not so, it will
20509 be because some frag between grows, and that will force
20513 && sym_frag->relax_marker != fragp->relax_marker)
20517 /* Adjust stretch for any alignment frag. Note that if have
20518 been expanding the earlier code, the symbol may be
20519 defined in what appears to be an earlier frag. FIXME:
20520 This doesn't handle the fr_subtype field, which specifies
20521 a maximum number of bytes to skip when doing an
20523 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20525 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20528 stretch = - ((- stretch)
20529 & ~ ((1 << (int) f->fr_offset) - 1));
20531 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20543 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20546 relax_adr (fragS *fragp, asection *sec, long stretch)
20551 /* Assume worst case for symbols not known to be in the same section. */
20552 if (fragp->fr_symbol == NULL
20553 || !S_IS_DEFINED (fragp->fr_symbol)
20554 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20555 || S_IS_WEAK (fragp->fr_symbol))
20558 val = relaxed_symbol_addr (fragp, stretch);
20559 addr = fragp->fr_address + fragp->fr_fix;
20560 addr = (addr + 4) & ~3;
20561 /* Force misaligned targets to 32-bit variant. */
20565 if (val < 0 || val > 1020)
20570 /* Return the size of a relaxable add/sub immediate instruction. */
20572 relax_addsub (fragS *fragp, asection *sec)
20577 buf = fragp->fr_literal + fragp->fr_fix;
20578 op = bfd_get_16(sec->owner, buf);
20579 if ((op & 0xf) == ((op >> 4) & 0xf))
20580 return relax_immediate (fragp, 8, 0);
20582 return relax_immediate (fragp, 3, 0);
20585 /* Return TRUE iff the definition of symbol S could be pre-empted
20586 (overridden) at link or load time. */
20588 symbol_preemptible (symbolS *s)
20590 /* Weak symbols can always be pre-empted. */
20594 /* Non-global symbols cannot be pre-empted. */
20595 if (! S_IS_EXTERNAL (s))
20599 /* In ELF, a global symbol can be marked protected, or private. In that
20600 case it can't be pre-empted (other definitions in the same link unit
20601 would violate the ODR). */
20602 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20606 /* Other global symbols might be pre-empted. */
20610 /* Return the size of a relaxable branch instruction. BITS is the
20611 size of the offset field in the narrow instruction. */
20614 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20620 /* Assume worst case for symbols not known to be in the same section. */
20621 if (!S_IS_DEFINED (fragp->fr_symbol)
20622 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20623 || S_IS_WEAK (fragp->fr_symbol))
20627 /* A branch to a function in ARM state will require interworking. */
20628 if (S_IS_DEFINED (fragp->fr_symbol)
20629 && ARM_IS_FUNC (fragp->fr_symbol))
20633 if (symbol_preemptible (fragp->fr_symbol))
20636 val = relaxed_symbol_addr (fragp, stretch);
20637 addr = fragp->fr_address + fragp->fr_fix + 4;
20640 /* Offset is a signed value *2 */
20642 if (val >= limit || val < -limit)
20648 /* Relax a machine dependent frag. This returns the amount by which
20649 the current size of the frag should change. */
20652 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20657 oldsize = fragp->fr_var;
20658 switch (fragp->fr_subtype)
20660 case T_MNEM_ldr_pc2:
20661 newsize = relax_adr (fragp, sec, stretch);
20663 case T_MNEM_ldr_pc:
20664 case T_MNEM_ldr_sp:
20665 case T_MNEM_str_sp:
20666 newsize = relax_immediate (fragp, 8, 2);
20670 newsize = relax_immediate (fragp, 5, 2);
20674 newsize = relax_immediate (fragp, 5, 1);
20678 newsize = relax_immediate (fragp, 5, 0);
20681 newsize = relax_adr (fragp, sec, stretch);
20687 newsize = relax_immediate (fragp, 8, 0);
20690 newsize = relax_branch (fragp, sec, 11, stretch);
20693 newsize = relax_branch (fragp, sec, 8, stretch);
20695 case T_MNEM_add_sp:
20696 case T_MNEM_add_pc:
20697 newsize = relax_immediate (fragp, 8, 2);
20699 case T_MNEM_inc_sp:
20700 case T_MNEM_dec_sp:
20701 newsize = relax_immediate (fragp, 7, 2);
20707 newsize = relax_addsub (fragp, sec);
20713 fragp->fr_var = newsize;
20714 /* Freeze wide instructions that are at or before the same location as
20715 in the previous pass. This avoids infinite loops.
20716 Don't freeze them unconditionally because targets may be artificially
20717 misaligned by the expansion of preceding frags. */
20718 if (stretch <= 0 && newsize > 2)
20720 md_convert_frag (sec->owner, sec, fragp);
20724 return newsize - oldsize;
20727 /* Round up a section size to the appropriate boundary. */
20730 md_section_align (segT segment ATTRIBUTE_UNUSED,
20733 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20734 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20736 /* For a.out, force the section size to be aligned. If we don't do
20737 this, BFD will align it for us, but it will not write out the
20738 final bytes of the section. This may be a bug in BFD, but it is
20739 easier to fix it here since that is how the other a.out targets
20743 align = bfd_get_section_alignment (stdoutput, segment);
20744 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20751 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20752 of an rs_align_code fragment. */
20755 arm_handle_align (fragS * fragP)
20757 static char const arm_noop[2][2][4] =
20760 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20761 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20764 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20765 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20768 static char const thumb_noop[2][2][2] =
20771 {0xc0, 0x46}, /* LE */
20772 {0x46, 0xc0}, /* BE */
20775 {0x00, 0xbf}, /* LE */
20776 {0xbf, 0x00} /* BE */
20779 static char const wide_thumb_noop[2][4] =
20780 { /* Wide Thumb-2 */
20781 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20782 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20785 unsigned bytes, fix, noop_size;
20788 const char *narrow_noop = NULL;
20793 if (fragP->fr_type != rs_align_code)
20796 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20797 p = fragP->fr_literal + fragP->fr_fix;
20800 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20801 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20803 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20805 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20807 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
20809 narrow_noop = thumb_noop[1][target_big_endian];
20810 noop = wide_thumb_noop[target_big_endian];
20813 noop = thumb_noop[0][target_big_endian];
20821 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
20822 [target_big_endian];
20829 fragP->fr_var = noop_size;
20831 if (bytes & (noop_size - 1))
20833 fix = bytes & (noop_size - 1);
20835 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20837 memset (p, 0, fix);
20844 if (bytes & noop_size)
20846 /* Insert a narrow noop. */
20847 memcpy (p, narrow_noop, noop_size);
20849 bytes -= noop_size;
20853 /* Use wide noops for the remainder */
20857 while (bytes >= noop_size)
20859 memcpy (p, noop, noop_size);
20861 bytes -= noop_size;
20865 fragP->fr_fix += fix;
20868 /* Called from md_do_align. Used to create an alignment
20869 frag in a code section. */
20872 arm_frag_align_code (int n, int max)
20876 /* We assume that there will never be a requirement
20877 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20878 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20883 _("alignments greater than %d bytes not supported in .text sections."),
20884 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20885 as_fatal ("%s", err_msg);
20888 p = frag_var (rs_align_code,
20889 MAX_MEM_FOR_RS_ALIGN_CODE,
20891 (relax_substateT) max,
20898 /* Perform target specific initialisation of a frag.
20899 Note - despite the name this initialisation is not done when the frag
20900 is created, but only when its type is assigned. A frag can be created
20901 and used a long time before its type is set, so beware of assuming that
20902 this initialisationis performed first. */
20906 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
20908 /* Record whether this frag is in an ARM or a THUMB area. */
20909 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20912 #else /* OBJ_ELF is defined. */
20914 arm_init_frag (fragS * fragP, int max_chars)
20916 /* If the current ARM vs THUMB mode has not already
20917 been recorded into this frag then do so now. */
20918 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
20920 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20922 /* Record a mapping symbol for alignment frags. We will delete this
20923 later if the alignment ends up empty. */
20924 switch (fragP->fr_type)
20927 case rs_align_test:
20929 mapping_state_2 (MAP_DATA, max_chars);
20931 case rs_align_code:
20932 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
20940 /* When we change sections we need to issue a new mapping symbol. */
20943 arm_elf_change_section (void)
20945 /* Link an unlinked unwind index table section to the .text section. */
20946 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
20947 && elf_linked_to_section (now_seg) == NULL)
20948 elf_linked_to_section (now_seg) = text_section;
20952 arm_elf_section_type (const char * str, size_t len)
20954 if (len == 5 && strncmp (str, "exidx", 5) == 0)
20955 return SHT_ARM_EXIDX;
20960 /* Code to deal with unwinding tables. */
20962 static void add_unwind_adjustsp (offsetT);
20964 /* Generate any deferred unwind frame offset. */
20967 flush_pending_unwind (void)
20971 offset = unwind.pending_offset;
20972 unwind.pending_offset = 0;
20974 add_unwind_adjustsp (offset);
20977 /* Add an opcode to this list for this function. Two-byte opcodes should
20978 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
20982 add_unwind_opcode (valueT op, int length)
20984 /* Add any deferred stack adjustment. */
20985 if (unwind.pending_offset)
20986 flush_pending_unwind ();
20988 unwind.sp_restored = 0;
20990 if (unwind.opcode_count + length > unwind.opcode_alloc)
20992 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
20993 if (unwind.opcodes)
20994 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
20995 unwind.opcode_alloc);
20997 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21002 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21004 unwind.opcode_count++;
21008 /* Add unwind opcodes to adjust the stack pointer. */
21011 add_unwind_adjustsp (offsetT offset)
21015 if (offset > 0x200)
21017 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21022 /* Long form: 0xb2, uleb128. */
21023 /* This might not fit in a word so add the individual bytes,
21024 remembering the list is built in reverse order. */
21025 o = (valueT) ((offset - 0x204) >> 2);
21027 add_unwind_opcode (0, 1);
21029 /* Calculate the uleb128 encoding of the offset. */
21033 bytes[n] = o & 0x7f;
21039 /* Add the insn. */
21041 add_unwind_opcode (bytes[n - 1], 1);
21042 add_unwind_opcode (0xb2, 1);
21044 else if (offset > 0x100)
21046 /* Two short opcodes. */
21047 add_unwind_opcode (0x3f, 1);
21048 op = (offset - 0x104) >> 2;
21049 add_unwind_opcode (op, 1);
21051 else if (offset > 0)
21053 /* Short opcode. */
21054 op = (offset - 4) >> 2;
21055 add_unwind_opcode (op, 1);
21057 else if (offset < 0)
21060 while (offset > 0x100)
21062 add_unwind_opcode (0x7f, 1);
21065 op = ((offset - 4) >> 2) | 0x40;
21066 add_unwind_opcode (op, 1);
21070 /* Finish the list of unwind opcodes for this function. */
21072 finish_unwind_opcodes (void)
21076 if (unwind.fp_used)
21078 /* Adjust sp as necessary. */
21079 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21080 flush_pending_unwind ();
21082 /* After restoring sp from the frame pointer. */
21083 op = 0x90 | unwind.fp_reg;
21084 add_unwind_opcode (op, 1);
21087 flush_pending_unwind ();
21091 /* Start an exception table entry. If idx is nonzero this is an index table
21095 start_unwind_section (const segT text_seg, int idx)
21097 const char * text_name;
21098 const char * prefix;
21099 const char * prefix_once;
21100 const char * group_name;
21104 size_t sec_name_len;
21111 prefix = ELF_STRING_ARM_unwind;
21112 prefix_once = ELF_STRING_ARM_unwind_once;
21113 type = SHT_ARM_EXIDX;
21117 prefix = ELF_STRING_ARM_unwind_info;
21118 prefix_once = ELF_STRING_ARM_unwind_info_once;
21119 type = SHT_PROGBITS;
21122 text_name = segment_name (text_seg);
21123 if (streq (text_name, ".text"))
21126 if (strncmp (text_name, ".gnu.linkonce.t.",
21127 strlen (".gnu.linkonce.t.")) == 0)
21129 prefix = prefix_once;
21130 text_name += strlen (".gnu.linkonce.t.");
21133 prefix_len = strlen (prefix);
21134 text_len = strlen (text_name);
21135 sec_name_len = prefix_len + text_len;
21136 sec_name = (char *) xmalloc (sec_name_len + 1);
21137 memcpy (sec_name, prefix, prefix_len);
21138 memcpy (sec_name + prefix_len, text_name, text_len);
21139 sec_name[prefix_len + text_len] = '\0';
21145 /* Handle COMDAT group. */
21146 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21148 group_name = elf_group_name (text_seg);
21149 if (group_name == NULL)
21151 as_bad (_("Group section `%s' has no group signature"),
21152 segment_name (text_seg));
21153 ignore_rest_of_line ();
21156 flags |= SHF_GROUP;
21160 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21162 /* Set the section link for index tables. */
21164 elf_linked_to_section (now_seg) = text_seg;
21168 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21169 personality routine data. Returns zero, or the index table value for
21170 an inline entry. */
21173 create_unwind_entry (int have_data)
21178 /* The current word of data. */
21180 /* The number of bytes left in this word. */
21183 finish_unwind_opcodes ();
21185 /* Remember the current text section. */
21186 unwind.saved_seg = now_seg;
21187 unwind.saved_subseg = now_subseg;
21189 start_unwind_section (now_seg, 0);
21191 if (unwind.personality_routine == NULL)
21193 if (unwind.personality_index == -2)
21196 as_bad (_("handlerdata in cantunwind frame"));
21197 return 1; /* EXIDX_CANTUNWIND. */
21200 /* Use a default personality routine if none is specified. */
21201 if (unwind.personality_index == -1)
21203 if (unwind.opcode_count > 3)
21204 unwind.personality_index = 1;
21206 unwind.personality_index = 0;
21209 /* Space for the personality routine entry. */
21210 if (unwind.personality_index == 0)
21212 if (unwind.opcode_count > 3)
21213 as_bad (_("too many unwind opcodes for personality routine 0"));
21217 /* All the data is inline in the index table. */
21220 while (unwind.opcode_count > 0)
21222 unwind.opcode_count--;
21223 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21227 /* Pad with "finish" opcodes. */
21229 data = (data << 8) | 0xb0;
21236 /* We get two opcodes "free" in the first word. */
21237 size = unwind.opcode_count - 2;
21241 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21242 if (unwind.personality_index != -1)
21244 as_bad (_("attempt to recreate an unwind entry"));
21248 /* An extra byte is required for the opcode count. */
21249 size = unwind.opcode_count + 1;
21252 size = (size + 3) >> 2;
21254 as_bad (_("too many unwind opcodes"));
21256 frag_align (2, 0, 0);
21257 record_alignment (now_seg, 2);
21258 unwind.table_entry = expr_build_dot ();
21260 /* Allocate the table entry. */
21261 ptr = frag_more ((size << 2) + 4);
21262 /* PR 13449: Zero the table entries in case some of them are not used. */
21263 memset (ptr, 0, (size << 2) + 4);
21264 where = frag_now_fix () - ((size << 2) + 4);
21266 switch (unwind.personality_index)
21269 /* ??? Should this be a PLT generating relocation? */
21270 /* Custom personality routine. */
21271 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21272 BFD_RELOC_ARM_PREL31);
21277 /* Set the first byte to the number of additional words. */
21278 data = size > 0 ? size - 1 : 0;
21282 /* ABI defined personality routines. */
21284 /* Three opcodes bytes are packed into the first word. */
21291 /* The size and first two opcode bytes go in the first word. */
21292 data = ((0x80 + unwind.personality_index) << 8) | size;
21297 /* Should never happen. */
21301 /* Pack the opcodes into words (MSB first), reversing the list at the same
21303 while (unwind.opcode_count > 0)
21307 md_number_to_chars (ptr, data, 4);
21312 unwind.opcode_count--;
21314 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21317 /* Finish off the last word. */
21320 /* Pad with "finish" opcodes. */
21322 data = (data << 8) | 0xb0;
21324 md_number_to_chars (ptr, data, 4);
21329 /* Add an empty descriptor if there is no user-specified data. */
21330 ptr = frag_more (4);
21331 md_number_to_chars (ptr, 0, 4);
21338 /* Initialize the DWARF-2 unwind information for this procedure. */
21341 tc_arm_frame_initial_instructions (void)
21343 cfi_add_CFA_def_cfa (REG_SP, 0);
21345 #endif /* OBJ_ELF */
21347 /* Convert REGNAME to a DWARF-2 register number. */
21350 tc_arm_regname_to_dw2regnum (char *regname)
21352 int reg = arm_reg_parse (®name, REG_TYPE_RN);
21356 /* PR 16694: Allow VFP registers as well. */
21357 reg = arm_reg_parse (®name, REG_TYPE_VFS);
21361 reg = arm_reg_parse (®name, REG_TYPE_VFD);
21370 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21374 exp.X_op = O_secrel;
21375 exp.X_add_symbol = symbol;
21376 exp.X_add_number = 0;
21377 emit_expr (&exp, size);
21381 /* MD interface: Symbol and relocation handling. */
21383 /* Return the address within the segment that a PC-relative fixup is
21384 relative to. For ARM, PC-relative fixups applied to instructions
21385 are generally relative to the location of the fixup plus 8 bytes.
21386 Thumb branches are offset by 4, and Thumb loads relative to PC
21387 require special handling. */
21390 md_pcrel_from_section (fixS * fixP, segT seg)
21392 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21394 /* If this is pc-relative and we are going to emit a relocation
21395 then we just want to put out any pipeline compensation that the linker
21396 will need. Otherwise we want to use the calculated base.
21397 For WinCE we skip the bias for externals as well, since this
21398 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21400 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21401 || (arm_force_relocation (fixP)
21403 && !S_IS_EXTERNAL (fixP->fx_addsy)
21409 switch (fixP->fx_r_type)
21411 /* PC relative addressing on the Thumb is slightly odd as the
21412 bottom two bits of the PC are forced to zero for the
21413 calculation. This happens *after* application of the
21414 pipeline offset. However, Thumb adrl already adjusts for
21415 this, so we need not do it again. */
21416 case BFD_RELOC_ARM_THUMB_ADD:
21419 case BFD_RELOC_ARM_THUMB_OFFSET:
21420 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21421 case BFD_RELOC_ARM_T32_ADD_PC12:
21422 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21423 return (base + 4) & ~3;
21425 /* Thumb branches are simply offset by +4. */
21426 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21427 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21428 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21429 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21430 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21433 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21435 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21436 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21437 && ARM_IS_FUNC (fixP->fx_addsy)
21438 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21439 base = fixP->fx_where + fixP->fx_frag->fr_address;
21442 /* BLX is like branches above, but forces the low two bits of PC to
21444 case BFD_RELOC_THUMB_PCREL_BLX:
21446 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21447 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21448 && THUMB_IS_FUNC (fixP->fx_addsy)
21449 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21450 base = fixP->fx_where + fixP->fx_frag->fr_address;
21451 return (base + 4) & ~3;
21453 /* ARM mode branches are offset by +8. However, the Windows CE
21454 loader expects the relocation not to take this into account. */
21455 case BFD_RELOC_ARM_PCREL_BLX:
21457 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21458 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21459 && ARM_IS_FUNC (fixP->fx_addsy)
21460 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21461 base = fixP->fx_where + fixP->fx_frag->fr_address;
21464 case BFD_RELOC_ARM_PCREL_CALL:
21466 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21467 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21468 && THUMB_IS_FUNC (fixP->fx_addsy)
21469 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21470 base = fixP->fx_where + fixP->fx_frag->fr_address;
21473 case BFD_RELOC_ARM_PCREL_BRANCH:
21474 case BFD_RELOC_ARM_PCREL_JUMP:
21475 case BFD_RELOC_ARM_PLT32:
21477 /* When handling fixups immediately, because we have already
21478 discovered the value of a symbol, or the address of the frag involved
21479 we must account for the offset by +8, as the OS loader will never see the reloc.
21480 see fixup_segment() in write.c
21481 The S_IS_EXTERNAL test handles the case of global symbols.
21482 Those need the calculated base, not just the pipe compensation the linker will need. */
21484 && fixP->fx_addsy != NULL
21485 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21486 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21494 /* ARM mode loads relative to PC are also offset by +8. Unlike
21495 branches, the Windows CE loader *does* expect the relocation
21496 to take this into account. */
21497 case BFD_RELOC_ARM_OFFSET_IMM:
21498 case BFD_RELOC_ARM_OFFSET_IMM8:
21499 case BFD_RELOC_ARM_HWLITERAL:
21500 case BFD_RELOC_ARM_LITERAL:
21501 case BFD_RELOC_ARM_CP_OFF_IMM:
21505 /* Other PC-relative relocations are un-offset. */
21511 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21512 Otherwise we have no need to default values of symbols. */
21515 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21518 if (name[0] == '_' && name[1] == 'G'
21519 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21523 if (symbol_find (name))
21524 as_bad (_("GOT already in the symbol table"));
21526 GOT_symbol = symbol_new (name, undefined_section,
21527 (valueT) 0, & zero_address_frag);
21537 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21538 computed as two separate immediate values, added together. We
21539 already know that this value cannot be computed by just one ARM
21542 static unsigned int
21543 validate_immediate_twopart (unsigned int val,
21544 unsigned int * highpart)
21549 for (i = 0; i < 32; i += 2)
21550 if (((a = rotate_left (val, i)) & 0xff) != 0)
21556 * highpart = (a >> 8) | ((i + 24) << 7);
21558 else if (a & 0xff0000)
21560 if (a & 0xff000000)
21562 * highpart = (a >> 16) | ((i + 16) << 7);
21566 gas_assert (a & 0xff000000);
21567 * highpart = (a >> 24) | ((i + 8) << 7);
21570 return (a & 0xff) | (i << 7);
21577 validate_offset_imm (unsigned int val, int hwse)
21579 if ((hwse && val > 255) || val > 4095)
21584 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21585 negative immediate constant by altering the instruction. A bit of
21590 by inverting the second operand, and
21593 by negating the second operand. */
21596 negate_data_op (unsigned long * instruction,
21597 unsigned long value)
21600 unsigned long negated, inverted;
21602 negated = encode_arm_immediate (-value);
21603 inverted = encode_arm_immediate (~value);
21605 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21608 /* First negates. */
21609 case OPCODE_SUB: /* ADD <-> SUB */
21610 new_inst = OPCODE_ADD;
21615 new_inst = OPCODE_SUB;
21619 case OPCODE_CMP: /* CMP <-> CMN */
21620 new_inst = OPCODE_CMN;
21625 new_inst = OPCODE_CMP;
21629 /* Now Inverted ops. */
21630 case OPCODE_MOV: /* MOV <-> MVN */
21631 new_inst = OPCODE_MVN;
21636 new_inst = OPCODE_MOV;
21640 case OPCODE_AND: /* AND <-> BIC */
21641 new_inst = OPCODE_BIC;
21646 new_inst = OPCODE_AND;
21650 case OPCODE_ADC: /* ADC <-> SBC */
21651 new_inst = OPCODE_SBC;
21656 new_inst = OPCODE_ADC;
21660 /* We cannot do anything. */
21665 if (value == (unsigned) FAIL)
21668 *instruction &= OPCODE_MASK;
21669 *instruction |= new_inst << DATA_OP_SHIFT;
21673 /* Like negate_data_op, but for Thumb-2. */
21675 static unsigned int
21676 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21680 unsigned int negated, inverted;
21682 negated = encode_thumb32_immediate (-value);
21683 inverted = encode_thumb32_immediate (~value);
21685 rd = (*instruction >> 8) & 0xf;
21686 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21689 /* ADD <-> SUB. Includes CMP <-> CMN. */
21690 case T2_OPCODE_SUB:
21691 new_inst = T2_OPCODE_ADD;
21695 case T2_OPCODE_ADD:
21696 new_inst = T2_OPCODE_SUB;
21700 /* ORR <-> ORN. Includes MOV <-> MVN. */
21701 case T2_OPCODE_ORR:
21702 new_inst = T2_OPCODE_ORN;
21706 case T2_OPCODE_ORN:
21707 new_inst = T2_OPCODE_ORR;
21711 /* AND <-> BIC. TST has no inverted equivalent. */
21712 case T2_OPCODE_AND:
21713 new_inst = T2_OPCODE_BIC;
21720 case T2_OPCODE_BIC:
21721 new_inst = T2_OPCODE_AND;
21726 case T2_OPCODE_ADC:
21727 new_inst = T2_OPCODE_SBC;
21731 case T2_OPCODE_SBC:
21732 new_inst = T2_OPCODE_ADC;
21736 /* We cannot do anything. */
21741 if (value == (unsigned int)FAIL)
21744 *instruction &= T2_OPCODE_MASK;
21745 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21749 /* Read a 32-bit thumb instruction from buf. */
21750 static unsigned long
21751 get_thumb32_insn (char * buf)
21753 unsigned long insn;
21754 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21755 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21761 /* We usually want to set the low bit on the address of thumb function
21762 symbols. In particular .word foo - . should have the low bit set.
21763 Generic code tries to fold the difference of two symbols to
21764 a constant. Prevent this and force a relocation when the first symbols
21765 is a thumb function. */
21768 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21770 if (op == O_subtract
21771 && l->X_op == O_symbol
21772 && r->X_op == O_symbol
21773 && THUMB_IS_FUNC (l->X_add_symbol))
21775 l->X_op = O_subtract;
21776 l->X_op_symbol = r->X_add_symbol;
21777 l->X_add_number -= r->X_add_number;
21781 /* Process as normal. */
21785 /* Encode Thumb2 unconditional branches and calls. The encoding
21786 for the 2 are identical for the immediate values. */
21789 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21791 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21794 addressT S, I1, I2, lo, hi;
21796 S = (value >> 24) & 0x01;
21797 I1 = (value >> 23) & 0x01;
21798 I2 = (value >> 22) & 0x01;
21799 hi = (value >> 12) & 0x3ff;
21800 lo = (value >> 1) & 0x7ff;
21801 newval = md_chars_to_number (buf, THUMB_SIZE);
21802 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21803 newval |= (S << 10) | hi;
21804 newval2 &= ~T2I1I2MASK;
21805 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21806 md_number_to_chars (buf, newval, THUMB_SIZE);
21807 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21811 md_apply_fix (fixS * fixP,
21815 offsetT value = * valP;
21817 unsigned int newimm;
21818 unsigned long temp;
21820 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21822 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21824 /* Note whether this will delete the relocation. */
21826 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21829 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21830 consistency with the behaviour on 32-bit hosts. Remember value
21832 value &= 0xffffffff;
21833 value ^= 0x80000000;
21834 value -= 0x80000000;
21837 fixP->fx_addnumber = value;
21839 /* Same treatment for fixP->fx_offset. */
21840 fixP->fx_offset &= 0xffffffff;
21841 fixP->fx_offset ^= 0x80000000;
21842 fixP->fx_offset -= 0x80000000;
21844 switch (fixP->fx_r_type)
21846 case BFD_RELOC_NONE:
21847 /* This will need to go in the object file. */
21851 case BFD_RELOC_ARM_IMMEDIATE:
21852 /* We claim that this fixup has been processed here,
21853 even if in fact we generate an error because we do
21854 not have a reloc for it, so tc_gen_reloc will reject it. */
21857 if (fixP->fx_addsy)
21859 const char *msg = 0;
21861 if (! S_IS_DEFINED (fixP->fx_addsy))
21862 msg = _("undefined symbol %s used as an immediate value");
21863 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21864 msg = _("symbol %s is in a different section");
21865 else if (S_IS_WEAK (fixP->fx_addsy))
21866 msg = _("symbol %s is weak and may be overridden later");
21870 as_bad_where (fixP->fx_file, fixP->fx_line,
21871 msg, S_GET_NAME (fixP->fx_addsy));
21876 temp = md_chars_to_number (buf, INSN_SIZE);
21878 /* If the offset is negative, we should use encoding A2 for ADR. */
21879 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21880 newimm = negate_data_op (&temp, value);
21883 newimm = encode_arm_immediate (value);
21885 /* If the instruction will fail, see if we can fix things up by
21886 changing the opcode. */
21887 if (newimm == (unsigned int) FAIL)
21888 newimm = negate_data_op (&temp, value);
21891 if (newimm == (unsigned int) FAIL)
21893 as_bad_where (fixP->fx_file, fixP->fx_line,
21894 _("invalid constant (%lx) after fixup"),
21895 (unsigned long) value);
21899 newimm |= (temp & 0xfffff000);
21900 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21903 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21905 unsigned int highpart = 0;
21906 unsigned int newinsn = 0xe1a00000; /* nop. */
21908 if (fixP->fx_addsy)
21910 const char *msg = 0;
21912 if (! S_IS_DEFINED (fixP->fx_addsy))
21913 msg = _("undefined symbol %s used as an immediate value");
21914 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21915 msg = _("symbol %s is in a different section");
21916 else if (S_IS_WEAK (fixP->fx_addsy))
21917 msg = _("symbol %s is weak and may be overridden later");
21921 as_bad_where (fixP->fx_file, fixP->fx_line,
21922 msg, S_GET_NAME (fixP->fx_addsy));
21927 newimm = encode_arm_immediate (value);
21928 temp = md_chars_to_number (buf, INSN_SIZE);
21930 /* If the instruction will fail, see if we can fix things up by
21931 changing the opcode. */
21932 if (newimm == (unsigned int) FAIL
21933 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
21935 /* No ? OK - try using two ADD instructions to generate
21937 newimm = validate_immediate_twopart (value, & highpart);
21939 /* Yes - then make sure that the second instruction is
21941 if (newimm != (unsigned int) FAIL)
21943 /* Still No ? Try using a negated value. */
21944 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
21945 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
21946 /* Otherwise - give up. */
21949 as_bad_where (fixP->fx_file, fixP->fx_line,
21950 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
21955 /* Replace the first operand in the 2nd instruction (which
21956 is the PC) with the destination register. We have
21957 already added in the PC in the first instruction and we
21958 do not want to do it again. */
21959 newinsn &= ~ 0xf0000;
21960 newinsn |= ((newinsn & 0x0f000) << 4);
21963 newimm |= (temp & 0xfffff000);
21964 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21966 highpart |= (newinsn & 0xfffff000);
21967 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
21971 case BFD_RELOC_ARM_OFFSET_IMM:
21972 if (!fixP->fx_done && seg->use_rela_p)
21975 case BFD_RELOC_ARM_LITERAL:
21981 if (validate_offset_imm (value, 0) == FAIL)
21983 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
21984 as_bad_where (fixP->fx_file, fixP->fx_line,
21985 _("invalid literal constant: pool needs to be closer"));
21987 as_bad_where (fixP->fx_file, fixP->fx_line,
21988 _("bad immediate value for offset (%ld)"),
21993 newval = md_chars_to_number (buf, INSN_SIZE);
21995 newval &= 0xfffff000;
21998 newval &= 0xff7ff000;
21999 newval |= value | (sign ? INDEX_UP : 0);
22001 md_number_to_chars (buf, newval, INSN_SIZE);
22004 case BFD_RELOC_ARM_OFFSET_IMM8:
22005 case BFD_RELOC_ARM_HWLITERAL:
22011 if (validate_offset_imm (value, 1) == FAIL)
22013 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22014 as_bad_where (fixP->fx_file, fixP->fx_line,
22015 _("invalid literal constant: pool needs to be closer"));
22017 as_bad_where (fixP->fx_file, fixP->fx_line,
22018 _("bad immediate value for 8-bit offset (%ld)"),
22023 newval = md_chars_to_number (buf, INSN_SIZE);
22025 newval &= 0xfffff0f0;
22028 newval &= 0xff7ff0f0;
22029 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22031 md_number_to_chars (buf, newval, INSN_SIZE);
22034 case BFD_RELOC_ARM_T32_OFFSET_U8:
22035 if (value < 0 || value > 1020 || value % 4 != 0)
22036 as_bad_where (fixP->fx_file, fixP->fx_line,
22037 _("bad immediate value for offset (%ld)"), (long) value);
22040 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22042 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22045 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22046 /* This is a complicated relocation used for all varieties of Thumb32
22047 load/store instruction with immediate offset:
22049 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22050 *4, optional writeback(W)
22051 (doubleword load/store)
22053 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22054 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22055 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22056 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22057 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22059 Uppercase letters indicate bits that are already encoded at
22060 this point. Lowercase letters are our problem. For the
22061 second block of instructions, the secondary opcode nybble
22062 (bits 8..11) is present, and bit 23 is zero, even if this is
22063 a PC-relative operation. */
22064 newval = md_chars_to_number (buf, THUMB_SIZE);
22066 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22068 if ((newval & 0xf0000000) == 0xe0000000)
22070 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22072 newval |= (1 << 23);
22075 if (value % 4 != 0)
22077 as_bad_where (fixP->fx_file, fixP->fx_line,
22078 _("offset not a multiple of 4"));
22084 as_bad_where (fixP->fx_file, fixP->fx_line,
22085 _("offset out of range"));
22090 else if ((newval & 0x000f0000) == 0x000f0000)
22092 /* PC-relative, 12-bit offset. */
22094 newval |= (1 << 23);
22099 as_bad_where (fixP->fx_file, fixP->fx_line,
22100 _("offset out of range"));
22105 else if ((newval & 0x00000100) == 0x00000100)
22107 /* Writeback: 8-bit, +/- offset. */
22109 newval |= (1 << 9);
22114 as_bad_where (fixP->fx_file, fixP->fx_line,
22115 _("offset out of range"));
22120 else if ((newval & 0x00000f00) == 0x00000e00)
22122 /* T-instruction: positive 8-bit offset. */
22123 if (value < 0 || value > 0xff)
22125 as_bad_where (fixP->fx_file, fixP->fx_line,
22126 _("offset out of range"));
22134 /* Positive 12-bit or negative 8-bit offset. */
22138 newval |= (1 << 23);
22148 as_bad_where (fixP->fx_file, fixP->fx_line,
22149 _("offset out of range"));
22156 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22157 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22160 case BFD_RELOC_ARM_SHIFT_IMM:
22161 newval = md_chars_to_number (buf, INSN_SIZE);
22162 if (((unsigned long) value) > 32
22164 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22166 as_bad_where (fixP->fx_file, fixP->fx_line,
22167 _("shift expression is too large"));
22172 /* Shifts of zero must be done as lsl. */
22174 else if (value == 32)
22176 newval &= 0xfffff07f;
22177 newval |= (value & 0x1f) << 7;
22178 md_number_to_chars (buf, newval, INSN_SIZE);
22181 case BFD_RELOC_ARM_T32_IMMEDIATE:
22182 case BFD_RELOC_ARM_T32_ADD_IMM:
22183 case BFD_RELOC_ARM_T32_IMM12:
22184 case BFD_RELOC_ARM_T32_ADD_PC12:
22185 /* We claim that this fixup has been processed here,
22186 even if in fact we generate an error because we do
22187 not have a reloc for it, so tc_gen_reloc will reject it. */
22191 && ! S_IS_DEFINED (fixP->fx_addsy))
22193 as_bad_where (fixP->fx_file, fixP->fx_line,
22194 _("undefined symbol %s used as an immediate value"),
22195 S_GET_NAME (fixP->fx_addsy));
22199 newval = md_chars_to_number (buf, THUMB_SIZE);
22201 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22204 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22205 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22207 newimm = encode_thumb32_immediate (value);
22208 if (newimm == (unsigned int) FAIL)
22209 newimm = thumb32_negate_data_op (&newval, value);
22211 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22212 && newimm == (unsigned int) FAIL)
22214 /* Turn add/sum into addw/subw. */
22215 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22216 newval = (newval & 0xfeffffff) | 0x02000000;
22217 /* No flat 12-bit imm encoding for addsw/subsw. */
22218 if ((newval & 0x00100000) == 0)
22220 /* 12 bit immediate for addw/subw. */
22224 newval ^= 0x00a00000;
22227 newimm = (unsigned int) FAIL;
22233 if (newimm == (unsigned int)FAIL)
22235 as_bad_where (fixP->fx_file, fixP->fx_line,
22236 _("invalid constant (%lx) after fixup"),
22237 (unsigned long) value);
22241 newval |= (newimm & 0x800) << 15;
22242 newval |= (newimm & 0x700) << 4;
22243 newval |= (newimm & 0x0ff);
22245 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22246 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22249 case BFD_RELOC_ARM_SMC:
22250 if (((unsigned long) value) > 0xffff)
22251 as_bad_where (fixP->fx_file, fixP->fx_line,
22252 _("invalid smc expression"));
22253 newval = md_chars_to_number (buf, INSN_SIZE);
22254 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22255 md_number_to_chars (buf, newval, INSN_SIZE);
22258 case BFD_RELOC_ARM_HVC:
22259 if (((unsigned long) value) > 0xffff)
22260 as_bad_where (fixP->fx_file, fixP->fx_line,
22261 _("invalid hvc expression"));
22262 newval = md_chars_to_number (buf, INSN_SIZE);
22263 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22264 md_number_to_chars (buf, newval, INSN_SIZE);
22267 case BFD_RELOC_ARM_SWI:
22268 if (fixP->tc_fix_data != 0)
22270 if (((unsigned long) value) > 0xff)
22271 as_bad_where (fixP->fx_file, fixP->fx_line,
22272 _("invalid swi expression"));
22273 newval = md_chars_to_number (buf, THUMB_SIZE);
22275 md_number_to_chars (buf, newval, THUMB_SIZE);
22279 if (((unsigned long) value) > 0x00ffffff)
22280 as_bad_where (fixP->fx_file, fixP->fx_line,
22281 _("invalid swi expression"));
22282 newval = md_chars_to_number (buf, INSN_SIZE);
22284 md_number_to_chars (buf, newval, INSN_SIZE);
22288 case BFD_RELOC_ARM_MULTI:
22289 if (((unsigned long) value) > 0xffff)
22290 as_bad_where (fixP->fx_file, fixP->fx_line,
22291 _("invalid expression in load/store multiple"));
22292 newval = value | md_chars_to_number (buf, INSN_SIZE);
22293 md_number_to_chars (buf, newval, INSN_SIZE);
22297 case BFD_RELOC_ARM_PCREL_CALL:
22299 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22301 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22302 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22303 && THUMB_IS_FUNC (fixP->fx_addsy))
22304 /* Flip the bl to blx. This is a simple flip
22305 bit here because we generate PCREL_CALL for
22306 unconditional bls. */
22308 newval = md_chars_to_number (buf, INSN_SIZE);
22309 newval = newval | 0x10000000;
22310 md_number_to_chars (buf, newval, INSN_SIZE);
22316 goto arm_branch_common;
22318 case BFD_RELOC_ARM_PCREL_JUMP:
22319 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22321 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22322 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22323 && THUMB_IS_FUNC (fixP->fx_addsy))
22325 /* This would map to a bl<cond>, b<cond>,
22326 b<always> to a Thumb function. We
22327 need to force a relocation for this particular
22329 newval = md_chars_to_number (buf, INSN_SIZE);
22333 case BFD_RELOC_ARM_PLT32:
22335 case BFD_RELOC_ARM_PCREL_BRANCH:
22337 goto arm_branch_common;
22339 case BFD_RELOC_ARM_PCREL_BLX:
22342 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22344 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22345 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22346 && ARM_IS_FUNC (fixP->fx_addsy))
22348 /* Flip the blx to a bl and warn. */
22349 const char *name = S_GET_NAME (fixP->fx_addsy);
22350 newval = 0xeb000000;
22351 as_warn_where (fixP->fx_file, fixP->fx_line,
22352 _("blx to '%s' an ARM ISA state function changed to bl"),
22354 md_number_to_chars (buf, newval, INSN_SIZE);
22360 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22361 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22365 /* We are going to store value (shifted right by two) in the
22366 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22367 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22368 also be be clear. */
22370 as_bad_where (fixP->fx_file, fixP->fx_line,
22371 _("misaligned branch destination"));
22372 if ((value & (offsetT)0xfe000000) != (offsetT)0
22373 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22374 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22376 if (fixP->fx_done || !seg->use_rela_p)
22378 newval = md_chars_to_number (buf, INSN_SIZE);
22379 newval |= (value >> 2) & 0x00ffffff;
22380 /* Set the H bit on BLX instructions. */
22384 newval |= 0x01000000;
22386 newval &= ~0x01000000;
22388 md_number_to_chars (buf, newval, INSN_SIZE);
22392 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22393 /* CBZ can only branch forward. */
22395 /* Attempts to use CBZ to branch to the next instruction
22396 (which, strictly speaking, are prohibited) will be turned into
22399 FIXME: It may be better to remove the instruction completely and
22400 perform relaxation. */
22403 newval = md_chars_to_number (buf, THUMB_SIZE);
22404 newval = 0xbf00; /* NOP encoding T1 */
22405 md_number_to_chars (buf, newval, THUMB_SIZE);
22410 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22412 if (fixP->fx_done || !seg->use_rela_p)
22414 newval = md_chars_to_number (buf, THUMB_SIZE);
22415 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22416 md_number_to_chars (buf, newval, THUMB_SIZE);
22421 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22422 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22423 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22425 if (fixP->fx_done || !seg->use_rela_p)
22427 newval = md_chars_to_number (buf, THUMB_SIZE);
22428 newval |= (value & 0x1ff) >> 1;
22429 md_number_to_chars (buf, newval, THUMB_SIZE);
22433 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22434 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22435 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22437 if (fixP->fx_done || !seg->use_rela_p)
22439 newval = md_chars_to_number (buf, THUMB_SIZE);
22440 newval |= (value & 0xfff) >> 1;
22441 md_number_to_chars (buf, newval, THUMB_SIZE);
22445 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22447 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22448 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22449 && ARM_IS_FUNC (fixP->fx_addsy)
22450 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22452 /* Force a relocation for a branch 20 bits wide. */
22455 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22456 as_bad_where (fixP->fx_file, fixP->fx_line,
22457 _("conditional branch out of range"));
22459 if (fixP->fx_done || !seg->use_rela_p)
22462 addressT S, J1, J2, lo, hi;
22464 S = (value & 0x00100000) >> 20;
22465 J2 = (value & 0x00080000) >> 19;
22466 J1 = (value & 0x00040000) >> 18;
22467 hi = (value & 0x0003f000) >> 12;
22468 lo = (value & 0x00000ffe) >> 1;
22470 newval = md_chars_to_number (buf, THUMB_SIZE);
22471 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22472 newval |= (S << 10) | hi;
22473 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22474 md_number_to_chars (buf, newval, THUMB_SIZE);
22475 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22479 case BFD_RELOC_THUMB_PCREL_BLX:
22480 /* If there is a blx from a thumb state function to
22481 another thumb function flip this to a bl and warn
22485 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22486 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22487 && THUMB_IS_FUNC (fixP->fx_addsy))
22489 const char *name = S_GET_NAME (fixP->fx_addsy);
22490 as_warn_where (fixP->fx_file, fixP->fx_line,
22491 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22493 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22494 newval = newval | 0x1000;
22495 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22496 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22501 goto thumb_bl_common;
22503 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22504 /* A bl from Thumb state ISA to an internal ARM state function
22505 is converted to a blx. */
22507 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22508 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22509 && ARM_IS_FUNC (fixP->fx_addsy)
22510 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22512 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22513 newval = newval & ~0x1000;
22514 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22515 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22521 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22522 /* For a BLX instruction, make sure that the relocation is rounded up
22523 to a word boundary. This follows the semantics of the instruction
22524 which specifies that bit 1 of the target address will come from bit
22525 1 of the base address. */
22526 value = (value + 3) & ~ 3;
22529 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22530 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22531 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22534 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22536 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22537 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22538 else if ((value & ~0x1ffffff)
22539 && ((value & ~0x1ffffff) != ~0x1ffffff))
22540 as_bad_where (fixP->fx_file, fixP->fx_line,
22541 _("Thumb2 branch out of range"));
22544 if (fixP->fx_done || !seg->use_rela_p)
22545 encode_thumb2_b_bl_offset (buf, value);
22549 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22550 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22551 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22553 if (fixP->fx_done || !seg->use_rela_p)
22554 encode_thumb2_b_bl_offset (buf, value);
22559 if (fixP->fx_done || !seg->use_rela_p)
22564 if (fixP->fx_done || !seg->use_rela_p)
22565 md_number_to_chars (buf, value, 2);
22569 case BFD_RELOC_ARM_TLS_CALL:
22570 case BFD_RELOC_ARM_THM_TLS_CALL:
22571 case BFD_RELOC_ARM_TLS_DESCSEQ:
22572 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22573 case BFD_RELOC_ARM_TLS_GOTDESC:
22574 case BFD_RELOC_ARM_TLS_GD32:
22575 case BFD_RELOC_ARM_TLS_LE32:
22576 case BFD_RELOC_ARM_TLS_IE32:
22577 case BFD_RELOC_ARM_TLS_LDM32:
22578 case BFD_RELOC_ARM_TLS_LDO32:
22579 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22582 case BFD_RELOC_ARM_GOT32:
22583 case BFD_RELOC_ARM_GOTOFF:
22586 case BFD_RELOC_ARM_GOT_PREL:
22587 if (fixP->fx_done || !seg->use_rela_p)
22588 md_number_to_chars (buf, value, 4);
22591 case BFD_RELOC_ARM_TARGET2:
22592 /* TARGET2 is not partial-inplace, so we need to write the
22593 addend here for REL targets, because it won't be written out
22594 during reloc processing later. */
22595 if (fixP->fx_done || !seg->use_rela_p)
22596 md_number_to_chars (buf, fixP->fx_offset, 4);
22600 case BFD_RELOC_RVA:
22602 case BFD_RELOC_ARM_TARGET1:
22603 case BFD_RELOC_ARM_ROSEGREL32:
22604 case BFD_RELOC_ARM_SBREL32:
22605 case BFD_RELOC_32_PCREL:
22607 case BFD_RELOC_32_SECREL:
22609 if (fixP->fx_done || !seg->use_rela_p)
22611 /* For WinCE we only do this for pcrel fixups. */
22612 if (fixP->fx_done || fixP->fx_pcrel)
22614 md_number_to_chars (buf, value, 4);
22618 case BFD_RELOC_ARM_PREL31:
22619 if (fixP->fx_done || !seg->use_rela_p)
22621 newval = md_chars_to_number (buf, 4) & 0x80000000;
22622 if ((value ^ (value >> 1)) & 0x40000000)
22624 as_bad_where (fixP->fx_file, fixP->fx_line,
22625 _("rel31 relocation overflow"));
22627 newval |= value & 0x7fffffff;
22628 md_number_to_chars (buf, newval, 4);
22633 case BFD_RELOC_ARM_CP_OFF_IMM:
22634 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22635 if (value < -1023 || value > 1023 || (value & 3))
22636 as_bad_where (fixP->fx_file, fixP->fx_line,
22637 _("co-processor offset out of range"));
22642 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22643 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22644 newval = md_chars_to_number (buf, INSN_SIZE);
22646 newval = get_thumb32_insn (buf);
22648 newval &= 0xffffff00;
22651 newval &= 0xff7fff00;
22652 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22654 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22655 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22656 md_number_to_chars (buf, newval, INSN_SIZE);
22658 put_thumb32_insn (buf, newval);
22661 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22662 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22663 if (value < -255 || value > 255)
22664 as_bad_where (fixP->fx_file, fixP->fx_line,
22665 _("co-processor offset out of range"));
22667 goto cp_off_common;
22669 case BFD_RELOC_ARM_THUMB_OFFSET:
22670 newval = md_chars_to_number (buf, THUMB_SIZE);
22671 /* Exactly what ranges, and where the offset is inserted depends
22672 on the type of instruction, we can establish this from the
22674 switch (newval >> 12)
22676 case 4: /* PC load. */
22677 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22678 forced to zero for these loads; md_pcrel_from has already
22679 compensated for this. */
22681 as_bad_where (fixP->fx_file, fixP->fx_line,
22682 _("invalid offset, target not word aligned (0x%08lX)"),
22683 (((unsigned long) fixP->fx_frag->fr_address
22684 + (unsigned long) fixP->fx_where) & ~3)
22685 + (unsigned long) value);
22687 if (value & ~0x3fc)
22688 as_bad_where (fixP->fx_file, fixP->fx_line,
22689 _("invalid offset, value too big (0x%08lX)"),
22692 newval |= value >> 2;
22695 case 9: /* SP load/store. */
22696 if (value & ~0x3fc)
22697 as_bad_where (fixP->fx_file, fixP->fx_line,
22698 _("invalid offset, value too big (0x%08lX)"),
22700 newval |= value >> 2;
22703 case 6: /* Word load/store. */
22705 as_bad_where (fixP->fx_file, fixP->fx_line,
22706 _("invalid offset, value too big (0x%08lX)"),
22708 newval |= value << 4; /* 6 - 2. */
22711 case 7: /* Byte load/store. */
22713 as_bad_where (fixP->fx_file, fixP->fx_line,
22714 _("invalid offset, value too big (0x%08lX)"),
22716 newval |= value << 6;
22719 case 8: /* Halfword load/store. */
22721 as_bad_where (fixP->fx_file, fixP->fx_line,
22722 _("invalid offset, value too big (0x%08lX)"),
22724 newval |= value << 5; /* 6 - 1. */
22728 as_bad_where (fixP->fx_file, fixP->fx_line,
22729 "Unable to process relocation for thumb opcode: %lx",
22730 (unsigned long) newval);
22733 md_number_to_chars (buf, newval, THUMB_SIZE);
22736 case BFD_RELOC_ARM_THUMB_ADD:
22737 /* This is a complicated relocation, since we use it for all of
22738 the following immediate relocations:
22742 9bit ADD/SUB SP word-aligned
22743 10bit ADD PC/SP word-aligned
22745 The type of instruction being processed is encoded in the
22752 newval = md_chars_to_number (buf, THUMB_SIZE);
22754 int rd = (newval >> 4) & 0xf;
22755 int rs = newval & 0xf;
22756 int subtract = !!(newval & 0x8000);
22758 /* Check for HI regs, only very restricted cases allowed:
22759 Adjusting SP, and using PC or SP to get an address. */
22760 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22761 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22762 as_bad_where (fixP->fx_file, fixP->fx_line,
22763 _("invalid Hi register with immediate"));
22765 /* If value is negative, choose the opposite instruction. */
22769 subtract = !subtract;
22771 as_bad_where (fixP->fx_file, fixP->fx_line,
22772 _("immediate value out of range"));
22777 if (value & ~0x1fc)
22778 as_bad_where (fixP->fx_file, fixP->fx_line,
22779 _("invalid immediate for stack address calculation"));
22780 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22781 newval |= value >> 2;
22783 else if (rs == REG_PC || rs == REG_SP)
22785 if (subtract || value & ~0x3fc)
22786 as_bad_where (fixP->fx_file, fixP->fx_line,
22787 _("invalid immediate for address calculation (value = 0x%08lX)"),
22788 (unsigned long) value);
22789 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22791 newval |= value >> 2;
22796 as_bad_where (fixP->fx_file, fixP->fx_line,
22797 _("immediate value out of range"));
22798 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22799 newval |= (rd << 8) | value;
22804 as_bad_where (fixP->fx_file, fixP->fx_line,
22805 _("immediate value out of range"));
22806 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22807 newval |= rd | (rs << 3) | (value << 6);
22810 md_number_to_chars (buf, newval, THUMB_SIZE);
22813 case BFD_RELOC_ARM_THUMB_IMM:
22814 newval = md_chars_to_number (buf, THUMB_SIZE);
22815 if (value < 0 || value > 255)
22816 as_bad_where (fixP->fx_file, fixP->fx_line,
22817 _("invalid immediate: %ld is out of range"),
22820 md_number_to_chars (buf, newval, THUMB_SIZE);
22823 case BFD_RELOC_ARM_THUMB_SHIFT:
22824 /* 5bit shift value (0..32). LSL cannot take 32. */
22825 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22826 temp = newval & 0xf800;
22827 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22828 as_bad_where (fixP->fx_file, fixP->fx_line,
22829 _("invalid shift value: %ld"), (long) value);
22830 /* Shifts of zero must be encoded as LSL. */
22832 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22833 /* Shifts of 32 are encoded as zero. */
22834 else if (value == 32)
22836 newval |= value << 6;
22837 md_number_to_chars (buf, newval, THUMB_SIZE);
22840 case BFD_RELOC_VTABLE_INHERIT:
22841 case BFD_RELOC_VTABLE_ENTRY:
22845 case BFD_RELOC_ARM_MOVW:
22846 case BFD_RELOC_ARM_MOVT:
22847 case BFD_RELOC_ARM_THUMB_MOVW:
22848 case BFD_RELOC_ARM_THUMB_MOVT:
22849 if (fixP->fx_done || !seg->use_rela_p)
22851 /* REL format relocations are limited to a 16-bit addend. */
22852 if (!fixP->fx_done)
22854 if (value < -0x8000 || value > 0x7fff)
22855 as_bad_where (fixP->fx_file, fixP->fx_line,
22856 _("offset out of range"));
22858 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22859 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22864 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22865 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22867 newval = get_thumb32_insn (buf);
22868 newval &= 0xfbf08f00;
22869 newval |= (value & 0xf000) << 4;
22870 newval |= (value & 0x0800) << 15;
22871 newval |= (value & 0x0700) << 4;
22872 newval |= (value & 0x00ff);
22873 put_thumb32_insn (buf, newval);
22877 newval = md_chars_to_number (buf, 4);
22878 newval &= 0xfff0f000;
22879 newval |= value & 0x0fff;
22880 newval |= (value & 0xf000) << 4;
22881 md_number_to_chars (buf, newval, 4);
22886 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22887 case BFD_RELOC_ARM_ALU_PC_G0:
22888 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22889 case BFD_RELOC_ARM_ALU_PC_G1:
22890 case BFD_RELOC_ARM_ALU_PC_G2:
22891 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22892 case BFD_RELOC_ARM_ALU_SB_G0:
22893 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22894 case BFD_RELOC_ARM_ALU_SB_G1:
22895 case BFD_RELOC_ARM_ALU_SB_G2:
22896 gas_assert (!fixP->fx_done);
22897 if (!seg->use_rela_p)
22900 bfd_vma encoded_addend;
22901 bfd_vma addend_abs = abs (value);
22903 /* Check that the absolute value of the addend can be
22904 expressed as an 8-bit constant plus a rotation. */
22905 encoded_addend = encode_arm_immediate (addend_abs);
22906 if (encoded_addend == (unsigned int) FAIL)
22907 as_bad_where (fixP->fx_file, fixP->fx_line,
22908 _("the offset 0x%08lX is not representable"),
22909 (unsigned long) addend_abs);
22911 /* Extract the instruction. */
22912 insn = md_chars_to_number (buf, INSN_SIZE);
22914 /* If the addend is positive, use an ADD instruction.
22915 Otherwise use a SUB. Take care not to destroy the S bit. */
22916 insn &= 0xff1fffff;
22922 /* Place the encoded addend into the first 12 bits of the
22924 insn &= 0xfffff000;
22925 insn |= encoded_addend;
22927 /* Update the instruction. */
22928 md_number_to_chars (buf, insn, INSN_SIZE);
22932 case BFD_RELOC_ARM_LDR_PC_G0:
22933 case BFD_RELOC_ARM_LDR_PC_G1:
22934 case BFD_RELOC_ARM_LDR_PC_G2:
22935 case BFD_RELOC_ARM_LDR_SB_G0:
22936 case BFD_RELOC_ARM_LDR_SB_G1:
22937 case BFD_RELOC_ARM_LDR_SB_G2:
22938 gas_assert (!fixP->fx_done);
22939 if (!seg->use_rela_p)
22942 bfd_vma addend_abs = abs (value);
22944 /* Check that the absolute value of the addend can be
22945 encoded in 12 bits. */
22946 if (addend_abs >= 0x1000)
22947 as_bad_where (fixP->fx_file, fixP->fx_line,
22948 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
22949 (unsigned long) addend_abs);
22951 /* Extract the instruction. */
22952 insn = md_chars_to_number (buf, INSN_SIZE);
22954 /* If the addend is negative, clear bit 23 of the instruction.
22955 Otherwise set it. */
22957 insn &= ~(1 << 23);
22961 /* Place the absolute value of the addend into the first 12 bits
22962 of the instruction. */
22963 insn &= 0xfffff000;
22964 insn |= addend_abs;
22966 /* Update the instruction. */
22967 md_number_to_chars (buf, insn, INSN_SIZE);
22971 case BFD_RELOC_ARM_LDRS_PC_G0:
22972 case BFD_RELOC_ARM_LDRS_PC_G1:
22973 case BFD_RELOC_ARM_LDRS_PC_G2:
22974 case BFD_RELOC_ARM_LDRS_SB_G0:
22975 case BFD_RELOC_ARM_LDRS_SB_G1:
22976 case BFD_RELOC_ARM_LDRS_SB_G2:
22977 gas_assert (!fixP->fx_done);
22978 if (!seg->use_rela_p)
22981 bfd_vma addend_abs = abs (value);
22983 /* Check that the absolute value of the addend can be
22984 encoded in 8 bits. */
22985 if (addend_abs >= 0x100)
22986 as_bad_where (fixP->fx_file, fixP->fx_line,
22987 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
22988 (unsigned long) addend_abs);
22990 /* Extract the instruction. */
22991 insn = md_chars_to_number (buf, INSN_SIZE);
22993 /* If the addend is negative, clear bit 23 of the instruction.
22994 Otherwise set it. */
22996 insn &= ~(1 << 23);
23000 /* Place the first four bits of the absolute value of the addend
23001 into the first 4 bits of the instruction, and the remaining
23002 four into bits 8 .. 11. */
23003 insn &= 0xfffff0f0;
23004 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23006 /* Update the instruction. */
23007 md_number_to_chars (buf, insn, INSN_SIZE);
23011 case BFD_RELOC_ARM_LDC_PC_G0:
23012 case BFD_RELOC_ARM_LDC_PC_G1:
23013 case BFD_RELOC_ARM_LDC_PC_G2:
23014 case BFD_RELOC_ARM_LDC_SB_G0:
23015 case BFD_RELOC_ARM_LDC_SB_G1:
23016 case BFD_RELOC_ARM_LDC_SB_G2:
23017 gas_assert (!fixP->fx_done);
23018 if (!seg->use_rela_p)
23021 bfd_vma addend_abs = abs (value);
23023 /* Check that the absolute value of the addend is a multiple of
23024 four and, when divided by four, fits in 8 bits. */
23025 if (addend_abs & 0x3)
23026 as_bad_where (fixP->fx_file, fixP->fx_line,
23027 _("bad offset 0x%08lX (must be word-aligned)"),
23028 (unsigned long) addend_abs);
23030 if ((addend_abs >> 2) > 0xff)
23031 as_bad_where (fixP->fx_file, fixP->fx_line,
23032 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23033 (unsigned long) addend_abs);
23035 /* Extract the instruction. */
23036 insn = md_chars_to_number (buf, INSN_SIZE);
23038 /* If the addend is negative, clear bit 23 of the instruction.
23039 Otherwise set it. */
23041 insn &= ~(1 << 23);
23045 /* Place the addend (divided by four) into the first eight
23046 bits of the instruction. */
23047 insn &= 0xfffffff0;
23048 insn |= addend_abs >> 2;
23050 /* Update the instruction. */
23051 md_number_to_chars (buf, insn, INSN_SIZE);
23055 case BFD_RELOC_ARM_V4BX:
23056 /* This will need to go in the object file. */
23060 case BFD_RELOC_UNUSED:
23062 as_bad_where (fixP->fx_file, fixP->fx_line,
23063 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23067 /* Translate internal representation of relocation info to BFD target
23071 tc_gen_reloc (asection *section, fixS *fixp)
23074 bfd_reloc_code_real_type code;
23076 reloc = (arelent *) xmalloc (sizeof (arelent));
23078 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23079 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23080 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23082 if (fixp->fx_pcrel)
23084 if (section->use_rela_p)
23085 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23087 fixp->fx_offset = reloc->address;
23089 reloc->addend = fixp->fx_offset;
23091 switch (fixp->fx_r_type)
23094 if (fixp->fx_pcrel)
23096 code = BFD_RELOC_8_PCREL;
23101 if (fixp->fx_pcrel)
23103 code = BFD_RELOC_16_PCREL;
23108 if (fixp->fx_pcrel)
23110 code = BFD_RELOC_32_PCREL;
23114 case BFD_RELOC_ARM_MOVW:
23115 if (fixp->fx_pcrel)
23117 code = BFD_RELOC_ARM_MOVW_PCREL;
23121 case BFD_RELOC_ARM_MOVT:
23122 if (fixp->fx_pcrel)
23124 code = BFD_RELOC_ARM_MOVT_PCREL;
23128 case BFD_RELOC_ARM_THUMB_MOVW:
23129 if (fixp->fx_pcrel)
23131 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23135 case BFD_RELOC_ARM_THUMB_MOVT:
23136 if (fixp->fx_pcrel)
23138 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23142 case BFD_RELOC_NONE:
23143 case BFD_RELOC_ARM_PCREL_BRANCH:
23144 case BFD_RELOC_ARM_PCREL_BLX:
23145 case BFD_RELOC_RVA:
23146 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23147 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23148 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23149 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23150 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23151 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23152 case BFD_RELOC_VTABLE_ENTRY:
23153 case BFD_RELOC_VTABLE_INHERIT:
23155 case BFD_RELOC_32_SECREL:
23157 code = fixp->fx_r_type;
23160 case BFD_RELOC_THUMB_PCREL_BLX:
23162 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23163 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23166 code = BFD_RELOC_THUMB_PCREL_BLX;
23169 case BFD_RELOC_ARM_LITERAL:
23170 case BFD_RELOC_ARM_HWLITERAL:
23171 /* If this is called then the a literal has
23172 been referenced across a section boundary. */
23173 as_bad_where (fixp->fx_file, fixp->fx_line,
23174 _("literal referenced across section boundary"));
23178 case BFD_RELOC_ARM_TLS_CALL:
23179 case BFD_RELOC_ARM_THM_TLS_CALL:
23180 case BFD_RELOC_ARM_TLS_DESCSEQ:
23181 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23182 case BFD_RELOC_ARM_GOT32:
23183 case BFD_RELOC_ARM_GOTOFF:
23184 case BFD_RELOC_ARM_GOT_PREL:
23185 case BFD_RELOC_ARM_PLT32:
23186 case BFD_RELOC_ARM_TARGET1:
23187 case BFD_RELOC_ARM_ROSEGREL32:
23188 case BFD_RELOC_ARM_SBREL32:
23189 case BFD_RELOC_ARM_PREL31:
23190 case BFD_RELOC_ARM_TARGET2:
23191 case BFD_RELOC_ARM_TLS_LE32:
23192 case BFD_RELOC_ARM_TLS_LDO32:
23193 case BFD_RELOC_ARM_PCREL_CALL:
23194 case BFD_RELOC_ARM_PCREL_JUMP:
23195 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23196 case BFD_RELOC_ARM_ALU_PC_G0:
23197 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23198 case BFD_RELOC_ARM_ALU_PC_G1:
23199 case BFD_RELOC_ARM_ALU_PC_G2:
23200 case BFD_RELOC_ARM_LDR_PC_G0:
23201 case BFD_RELOC_ARM_LDR_PC_G1:
23202 case BFD_RELOC_ARM_LDR_PC_G2:
23203 case BFD_RELOC_ARM_LDRS_PC_G0:
23204 case BFD_RELOC_ARM_LDRS_PC_G1:
23205 case BFD_RELOC_ARM_LDRS_PC_G2:
23206 case BFD_RELOC_ARM_LDC_PC_G0:
23207 case BFD_RELOC_ARM_LDC_PC_G1:
23208 case BFD_RELOC_ARM_LDC_PC_G2:
23209 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23210 case BFD_RELOC_ARM_ALU_SB_G0:
23211 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23212 case BFD_RELOC_ARM_ALU_SB_G1:
23213 case BFD_RELOC_ARM_ALU_SB_G2:
23214 case BFD_RELOC_ARM_LDR_SB_G0:
23215 case BFD_RELOC_ARM_LDR_SB_G1:
23216 case BFD_RELOC_ARM_LDR_SB_G2:
23217 case BFD_RELOC_ARM_LDRS_SB_G0:
23218 case BFD_RELOC_ARM_LDRS_SB_G1:
23219 case BFD_RELOC_ARM_LDRS_SB_G2:
23220 case BFD_RELOC_ARM_LDC_SB_G0:
23221 case BFD_RELOC_ARM_LDC_SB_G1:
23222 case BFD_RELOC_ARM_LDC_SB_G2:
23223 case BFD_RELOC_ARM_V4BX:
23224 code = fixp->fx_r_type;
23227 case BFD_RELOC_ARM_TLS_GOTDESC:
23228 case BFD_RELOC_ARM_TLS_GD32:
23229 case BFD_RELOC_ARM_TLS_IE32:
23230 case BFD_RELOC_ARM_TLS_LDM32:
23231 /* BFD will include the symbol's address in the addend.
23232 But we don't want that, so subtract it out again here. */
23233 if (!S_IS_COMMON (fixp->fx_addsy))
23234 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23235 code = fixp->fx_r_type;
23239 case BFD_RELOC_ARM_IMMEDIATE:
23240 as_bad_where (fixp->fx_file, fixp->fx_line,
23241 _("internal relocation (type: IMMEDIATE) not fixed up"));
23244 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23245 as_bad_where (fixp->fx_file, fixp->fx_line,
23246 _("ADRL used for a symbol not defined in the same file"));
23249 case BFD_RELOC_ARM_OFFSET_IMM:
23250 if (section->use_rela_p)
23252 code = fixp->fx_r_type;
23256 if (fixp->fx_addsy != NULL
23257 && !S_IS_DEFINED (fixp->fx_addsy)
23258 && S_IS_LOCAL (fixp->fx_addsy))
23260 as_bad_where (fixp->fx_file, fixp->fx_line,
23261 _("undefined local label `%s'"),
23262 S_GET_NAME (fixp->fx_addsy));
23266 as_bad_where (fixp->fx_file, fixp->fx_line,
23267 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23274 switch (fixp->fx_r_type)
23276 case BFD_RELOC_NONE: type = "NONE"; break;
23277 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23278 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23279 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23280 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23281 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23282 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23283 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23284 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23285 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23286 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23287 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23288 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23289 default: type = _("<unknown>"); break;
23291 as_bad_where (fixp->fx_file, fixp->fx_line,
23292 _("cannot represent %s relocation in this object file format"),
23299 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23301 && fixp->fx_addsy == GOT_symbol)
23303 code = BFD_RELOC_ARM_GOTPC;
23304 reloc->addend = fixp->fx_offset = reloc->address;
23308 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23310 if (reloc->howto == NULL)
23312 as_bad_where (fixp->fx_file, fixp->fx_line,
23313 _("cannot represent %s relocation in this object file format"),
23314 bfd_get_reloc_code_name (code));
23318 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23319 vtable entry to be used in the relocation's section offset. */
23320 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23321 reloc->address = fixp->fx_offset;
23326 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23329 cons_fix_new_arm (fragS * frag,
23333 bfd_reloc_code_real_type reloc)
23338 FIXME: @@ Should look at CPU word size. */
23342 reloc = BFD_RELOC_8;
23345 reloc = BFD_RELOC_16;
23349 reloc = BFD_RELOC_32;
23352 reloc = BFD_RELOC_64;
23357 if (exp->X_op == O_secrel)
23359 exp->X_op = O_symbol;
23360 reloc = BFD_RELOC_32_SECREL;
23364 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23367 #if defined (OBJ_COFF)
23369 arm_validate_fix (fixS * fixP)
23371 /* If the destination of the branch is a defined symbol which does not have
23372 the THUMB_FUNC attribute, then we must be calling a function which has
23373 the (interfacearm) attribute. We look for the Thumb entry point to that
23374 function and change the branch to refer to that function instead. */
23375 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23376 && fixP->fx_addsy != NULL
23377 && S_IS_DEFINED (fixP->fx_addsy)
23378 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23380 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23387 arm_force_relocation (struct fix * fixp)
23389 #if defined (OBJ_COFF) && defined (TE_PE)
23390 if (fixp->fx_r_type == BFD_RELOC_RVA)
23394 /* In case we have a call or a branch to a function in ARM ISA mode from
23395 a thumb function or vice-versa force the relocation. These relocations
23396 are cleared off for some cores that might have blx and simple transformations
23400 switch (fixp->fx_r_type)
23402 case BFD_RELOC_ARM_PCREL_JUMP:
23403 case BFD_RELOC_ARM_PCREL_CALL:
23404 case BFD_RELOC_THUMB_PCREL_BLX:
23405 if (THUMB_IS_FUNC (fixp->fx_addsy))
23409 case BFD_RELOC_ARM_PCREL_BLX:
23410 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23411 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23412 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23413 if (ARM_IS_FUNC (fixp->fx_addsy))
23422 /* Resolve these relocations even if the symbol is extern or weak.
23423 Technically this is probably wrong due to symbol preemption.
23424 In practice these relocations do not have enough range to be useful
23425 at dynamic link time, and some code (e.g. in the Linux kernel)
23426 expects these references to be resolved. */
23427 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23428 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23429 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23430 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23431 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23432 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23433 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23434 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23435 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23436 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23437 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23438 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23439 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23440 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23443 /* Always leave these relocations for the linker. */
23444 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23445 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23446 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23449 /* Always generate relocations against function symbols. */
23450 if (fixp->fx_r_type == BFD_RELOC_32
23452 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23455 return generic_force_reloc (fixp);
23458 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23459 /* Relocations against function names must be left unadjusted,
23460 so that the linker can use this information to generate interworking
23461 stubs. The MIPS version of this function
23462 also prevents relocations that are mips-16 specific, but I do not
23463 know why it does this.
23466 There is one other problem that ought to be addressed here, but
23467 which currently is not: Taking the address of a label (rather
23468 than a function) and then later jumping to that address. Such
23469 addresses also ought to have their bottom bit set (assuming that
23470 they reside in Thumb code), but at the moment they will not. */
23473 arm_fix_adjustable (fixS * fixP)
23475 if (fixP->fx_addsy == NULL)
23478 /* Preserve relocations against symbols with function type. */
23479 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23482 if (THUMB_IS_FUNC (fixP->fx_addsy)
23483 && fixP->fx_subsy == NULL)
23486 /* We need the symbol name for the VTABLE entries. */
23487 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23488 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23491 /* Don't allow symbols to be discarded on GOT related relocs. */
23492 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23493 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23494 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23495 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23496 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23497 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23498 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23499 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23500 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23501 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23502 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23503 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23504 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23505 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23508 /* Similarly for group relocations. */
23509 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23510 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23511 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23514 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23515 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23516 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23517 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23518 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23519 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23520 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23521 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23522 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23527 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23532 elf32_arm_target_format (void)
23535 return (target_big_endian
23536 ? "elf32-bigarm-symbian"
23537 : "elf32-littlearm-symbian");
23538 #elif defined (TE_VXWORKS)
23539 return (target_big_endian
23540 ? "elf32-bigarm-vxworks"
23541 : "elf32-littlearm-vxworks");
23542 #elif defined (TE_NACL)
23543 return (target_big_endian
23544 ? "elf32-bigarm-nacl"
23545 : "elf32-littlearm-nacl");
23547 if (target_big_endian)
23548 return "elf32-bigarm";
23550 return "elf32-littlearm";
23555 armelf_frob_symbol (symbolS * symp,
23558 elf_frob_symbol (symp, puntp);
23562 /* MD interface: Finalization. */
23567 literal_pool * pool;
23569 /* Ensure that all the IT blocks are properly closed. */
23570 check_it_blocks_finished ();
23572 for (pool = list_of_pools; pool; pool = pool->next)
23574 /* Put it at the end of the relevant section. */
23575 subseg_set (pool->section, pool->sub_section);
23577 arm_elf_change_section ();
23584 /* Remove any excess mapping symbols generated for alignment frags in
23585 SEC. We may have created a mapping symbol before a zero byte
23586 alignment; remove it if there's a mapping symbol after the
23589 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23590 void *dummy ATTRIBUTE_UNUSED)
23592 segment_info_type *seginfo = seg_info (sec);
23595 if (seginfo == NULL || seginfo->frchainP == NULL)
23598 for (fragp = seginfo->frchainP->frch_root;
23600 fragp = fragp->fr_next)
23602 symbolS *sym = fragp->tc_frag_data.last_map;
23603 fragS *next = fragp->fr_next;
23605 /* Variable-sized frags have been converted to fixed size by
23606 this point. But if this was variable-sized to start with,
23607 there will be a fixed-size frag after it. So don't handle
23609 if (sym == NULL || next == NULL)
23612 if (S_GET_VALUE (sym) < next->fr_address)
23613 /* Not at the end of this frag. */
23615 know (S_GET_VALUE (sym) == next->fr_address);
23619 if (next->tc_frag_data.first_map != NULL)
23621 /* Next frag starts with a mapping symbol. Discard this
23623 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23627 if (next->fr_next == NULL)
23629 /* This mapping symbol is at the end of the section. Discard
23631 know (next->fr_fix == 0 && next->fr_var == 0);
23632 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23636 /* As long as we have empty frags without any mapping symbols,
23638 /* If the next frag is non-empty and does not start with a
23639 mapping symbol, then this mapping symbol is required. */
23640 if (next->fr_address != next->fr_next->fr_address)
23643 next = next->fr_next;
23645 while (next != NULL);
23650 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23654 arm_adjust_symtab (void)
23659 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23661 if (ARM_IS_THUMB (sym))
23663 if (THUMB_IS_FUNC (sym))
23665 /* Mark the symbol as a Thumb function. */
23666 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23667 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23668 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23670 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23671 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23673 as_bad (_("%s: unexpected function type: %d"),
23674 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23676 else switch (S_GET_STORAGE_CLASS (sym))
23679 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23682 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23685 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23693 if (ARM_IS_INTERWORK (sym))
23694 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23701 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23703 if (ARM_IS_THUMB (sym))
23705 elf_symbol_type * elf_sym;
23707 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23708 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23710 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23711 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23713 /* If it's a .thumb_func, declare it as so,
23714 otherwise tag label as .code 16. */
23715 if (THUMB_IS_FUNC (sym))
23716 elf_sym->internal_elf_sym.st_target_internal
23717 = ST_BRANCH_TO_THUMB;
23718 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23719 elf_sym->internal_elf_sym.st_info =
23720 ELF_ST_INFO (bind, STT_ARM_16BIT);
23725 /* Remove any overlapping mapping symbols generated by alignment frags. */
23726 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23727 /* Now do generic ELF adjustments. */
23728 elf_adjust_symtab ();
23732 /* MD interface: Initialization. */
23735 set_constant_flonums (void)
23739 for (i = 0; i < NUM_FLOAT_VALS; i++)
23740 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23744 /* Auto-select Thumb mode if it's the only available instruction set for the
23745 given architecture. */
23748 autoselect_thumb_from_cpu_variant (void)
23750 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23751 opcode_select (16);
23760 if ( (arm_ops_hsh = hash_new ()) == NULL
23761 || (arm_cond_hsh = hash_new ()) == NULL
23762 || (arm_shift_hsh = hash_new ()) == NULL
23763 || (arm_psr_hsh = hash_new ()) == NULL
23764 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23765 || (arm_reg_hsh = hash_new ()) == NULL
23766 || (arm_reloc_hsh = hash_new ()) == NULL
23767 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23768 as_fatal (_("virtual memory exhausted"));
23770 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23771 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23772 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23773 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23774 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23775 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23776 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23777 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23778 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23779 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23780 (void *) (v7m_psrs + i));
23781 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23782 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23784 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23786 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23787 (void *) (barrier_opt_names + i));
23789 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23791 struct reloc_entry * entry = reloc_names + i;
23793 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23794 /* This makes encode_branch() use the EABI versions of this relocation. */
23795 entry->reloc = BFD_RELOC_UNUSED;
23797 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23801 set_constant_flonums ();
23803 /* Set the cpu variant based on the command-line options. We prefer
23804 -mcpu= over -march= if both are set (as for GCC); and we prefer
23805 -mfpu= over any other way of setting the floating point unit.
23806 Use of legacy options with new options are faulted. */
23809 if (mcpu_cpu_opt || march_cpu_opt)
23810 as_bad (_("use of old and new-style options to set CPU type"));
23812 mcpu_cpu_opt = legacy_cpu;
23814 else if (!mcpu_cpu_opt)
23815 mcpu_cpu_opt = march_cpu_opt;
23820 as_bad (_("use of old and new-style options to set FPU type"));
23822 mfpu_opt = legacy_fpu;
23824 else if (!mfpu_opt)
23826 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23827 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23828 /* Some environments specify a default FPU. If they don't, infer it
23829 from the processor. */
23831 mfpu_opt = mcpu_fpu_opt;
23833 mfpu_opt = march_fpu_opt;
23835 mfpu_opt = &fpu_default;
23841 if (mcpu_cpu_opt != NULL)
23842 mfpu_opt = &fpu_default;
23843 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23844 mfpu_opt = &fpu_arch_vfp_v2;
23846 mfpu_opt = &fpu_arch_fpa;
23852 mcpu_cpu_opt = &cpu_default;
23853 selected_cpu = cpu_default;
23857 selected_cpu = *mcpu_cpu_opt;
23859 mcpu_cpu_opt = &arm_arch_any;
23862 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23864 autoselect_thumb_from_cpu_variant ();
23866 arm_arch_used = thumb_arch_used = arm_arch_none;
23868 #if defined OBJ_COFF || defined OBJ_ELF
23870 unsigned int flags = 0;
23872 #if defined OBJ_ELF
23873 flags = meabi_flags;
23875 switch (meabi_flags)
23877 case EF_ARM_EABI_UNKNOWN:
23879 /* Set the flags in the private structure. */
23880 if (uses_apcs_26) flags |= F_APCS26;
23881 if (support_interwork) flags |= F_INTERWORK;
23882 if (uses_apcs_float) flags |= F_APCS_FLOAT;
23883 if (pic_code) flags |= F_PIC;
23884 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23885 flags |= F_SOFT_FLOAT;
23887 switch (mfloat_abi_opt)
23889 case ARM_FLOAT_ABI_SOFT:
23890 case ARM_FLOAT_ABI_SOFTFP:
23891 flags |= F_SOFT_FLOAT;
23894 case ARM_FLOAT_ABI_HARD:
23895 if (flags & F_SOFT_FLOAT)
23896 as_bad (_("hard-float conflicts with specified fpu"));
23900 /* Using pure-endian doubles (even if soft-float). */
23901 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
23902 flags |= F_VFP_FLOAT;
23904 #if defined OBJ_ELF
23905 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
23906 flags |= EF_ARM_MAVERICK_FLOAT;
23909 case EF_ARM_EABI_VER4:
23910 case EF_ARM_EABI_VER5:
23911 /* No additional flags to set. */
23918 bfd_set_private_flags (stdoutput, flags);
23920 /* We have run out flags in the COFF header to encode the
23921 status of ATPCS support, so instead we create a dummy,
23922 empty, debug section called .arm.atpcs. */
23927 sec = bfd_make_section (stdoutput, ".arm.atpcs");
23931 bfd_set_section_flags
23932 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
23933 bfd_set_section_size (stdoutput, sec, 0);
23934 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
23940 /* Record the CPU type as well. */
23941 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
23942 mach = bfd_mach_arm_iWMMXt2;
23943 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
23944 mach = bfd_mach_arm_iWMMXt;
23945 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
23946 mach = bfd_mach_arm_XScale;
23947 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
23948 mach = bfd_mach_arm_ep9312;
23949 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
23950 mach = bfd_mach_arm_5TE;
23951 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
23953 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23954 mach = bfd_mach_arm_5T;
23956 mach = bfd_mach_arm_5;
23958 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
23960 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23961 mach = bfd_mach_arm_4T;
23963 mach = bfd_mach_arm_4;
23965 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
23966 mach = bfd_mach_arm_3M;
23967 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
23968 mach = bfd_mach_arm_3;
23969 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
23970 mach = bfd_mach_arm_2a;
23971 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
23972 mach = bfd_mach_arm_2;
23974 mach = bfd_mach_arm_unknown;
23976 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
23979 /* Command line processing. */
23982 Invocation line includes a switch not recognized by the base assembler.
23983 See if it's a processor-specific option.
23985 This routine is somewhat complicated by the need for backwards
23986 compatibility (since older releases of gcc can't be changed).
23987 The new options try to make the interface as compatible as
23990 New options (supported) are:
23992 -mcpu=<cpu name> Assemble for selected processor
23993 -march=<architecture name> Assemble for selected architecture
23994 -mfpu=<fpu architecture> Assemble for selected FPU.
23995 -EB/-mbig-endian Big-endian
23996 -EL/-mlittle-endian Little-endian
23997 -k Generate PIC code
23998 -mthumb Start in Thumb mode
23999 -mthumb-interwork Code supports ARM/Thumb interworking
24001 -m[no-]warn-deprecated Warn about deprecated features
24003 For now we will also provide support for:
24005 -mapcs-32 32-bit Program counter
24006 -mapcs-26 26-bit Program counter
24007 -macps-float Floats passed in FP registers
24008 -mapcs-reentrant Reentrant code
24010 (sometime these will probably be replaced with -mapcs=<list of options>
24011 and -matpcs=<list of options>)
24013 The remaining options are only supported for back-wards compatibility.
24014 Cpu variants, the arm part is optional:
24015 -m[arm]1 Currently not supported.
24016 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24017 -m[arm]3 Arm 3 processor
24018 -m[arm]6[xx], Arm 6 processors
24019 -m[arm]7[xx][t][[d]m] Arm 7 processors
24020 -m[arm]8[10] Arm 8 processors
24021 -m[arm]9[20][tdmi] Arm 9 processors
24022 -mstrongarm[110[0]] StrongARM processors
24023 -mxscale XScale processors
24024 -m[arm]v[2345[t[e]]] Arm architectures
24025 -mall All (except the ARM1)
24027 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24028 -mfpe-old (No float load/store multiples)
24029 -mvfpxd VFP Single precision
24031 -mno-fpu Disable all floating point instructions
24033 The following CPU names are recognized:
24034 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24035 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24036 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24037 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24038 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24039 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24040 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24044 const char * md_shortopts = "m:k";
24046 #ifdef ARM_BI_ENDIAN
24047 #define OPTION_EB (OPTION_MD_BASE + 0)
24048 #define OPTION_EL (OPTION_MD_BASE + 1)
24050 #if TARGET_BYTES_BIG_ENDIAN
24051 #define OPTION_EB (OPTION_MD_BASE + 0)
24053 #define OPTION_EL (OPTION_MD_BASE + 1)
24056 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24058 struct option md_longopts[] =
24061 {"EB", no_argument, NULL, OPTION_EB},
24064 {"EL", no_argument, NULL, OPTION_EL},
24066 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24067 {NULL, no_argument, NULL, 0}
24070 size_t md_longopts_size = sizeof (md_longopts);
24072 struct arm_option_table
24074 char *option; /* Option name to match. */
24075 char *help; /* Help information. */
24076 int *var; /* Variable to change. */
24077 int value; /* What to change it to. */
24078 char *deprecated; /* If non-null, print this message. */
24081 struct arm_option_table arm_opts[] =
24083 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24084 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24085 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24086 &support_interwork, 1, NULL},
24087 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24088 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24089 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24091 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24092 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24093 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24094 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24097 /* These are recognized by the assembler, but have no affect on code. */
24098 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24099 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24101 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24102 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24103 &warn_on_deprecated, 0, NULL},
24104 {NULL, NULL, NULL, 0, NULL}
24107 struct arm_legacy_option_table
24109 char *option; /* Option name to match. */
24110 const arm_feature_set **var; /* Variable to change. */
24111 const arm_feature_set value; /* What to change it to. */
24112 char *deprecated; /* If non-null, print this message. */
24115 const struct arm_legacy_option_table arm_legacy_opts[] =
24117 /* DON'T add any new processors to this list -- we want the whole list
24118 to go away... Add them to the processors table instead. */
24119 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24120 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24121 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24122 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24123 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24124 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24125 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24126 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24127 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24128 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24129 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24130 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24131 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24132 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24133 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24134 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24135 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24136 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24137 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24138 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24139 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24140 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24141 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24142 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24143 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24144 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24145 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24146 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24147 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24148 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24149 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24150 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24151 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24152 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24153 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24154 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24155 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24156 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24157 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24158 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24159 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24160 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24161 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24162 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24163 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24164 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24165 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24166 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24167 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24168 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24169 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24170 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24171 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24172 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24173 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24174 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24175 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24176 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24177 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24178 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24179 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24180 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24181 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24182 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24183 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24184 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24185 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24186 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24187 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24188 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24189 N_("use -mcpu=strongarm110")},
24190 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24191 N_("use -mcpu=strongarm1100")},
24192 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24193 N_("use -mcpu=strongarm1110")},
24194 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24195 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24196 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24198 /* Architecture variants -- don't add any more to this list either. */
24199 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24200 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24201 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24202 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24203 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24204 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24205 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24206 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24207 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24208 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24209 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24210 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24211 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24212 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24213 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24214 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24215 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24216 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24218 /* Floating point variants -- don't add any more to this list either. */
24219 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24220 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24221 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24222 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24223 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24225 {NULL, NULL, ARM_ARCH_NONE, NULL}
24228 struct arm_cpu_option_table
24232 const arm_feature_set value;
24233 /* For some CPUs we assume an FPU unless the user explicitly sets
24235 const arm_feature_set default_fpu;
24236 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24238 const char *canonical_name;
24241 /* This list should, at a minimum, contain all the cpu names
24242 recognized by GCC. */
24243 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24244 static const struct arm_cpu_option_table arm_cpus[] =
24246 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24247 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24248 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24249 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24250 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24251 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24252 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24253 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24254 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24255 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24256 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24257 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24258 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24259 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24260 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24261 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24262 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24263 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24264 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24265 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24266 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24267 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24268 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24269 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24270 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24271 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24272 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24273 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24274 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24275 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24276 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24277 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24278 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24279 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24280 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24281 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24282 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24283 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24284 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24285 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24286 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24287 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24288 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24289 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24290 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24291 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24292 /* For V5 or later processors we default to using VFP; but the user
24293 should really set the FPU type explicitly. */
24294 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24295 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24296 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24297 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24298 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24299 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24300 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24301 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24302 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24303 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24304 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24305 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24306 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24307 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24308 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24309 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24310 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24311 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24312 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24313 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24315 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24316 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24317 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24318 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24319 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24320 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24321 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24322 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24323 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24325 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24326 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24327 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24328 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24329 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24330 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
24331 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
24332 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24333 FPU_NONE, "Cortex-A5"),
24334 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24336 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24337 ARM_FEATURE (0, FPU_VFP_V3
24338 | FPU_NEON_EXT_V1),
24340 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24341 ARM_FEATURE (0, FPU_VFP_V3
24342 | FPU_NEON_EXT_V1),
24344 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24346 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24348 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24350 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24352 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24353 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24355 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24356 FPU_NONE, "Cortex-R5"),
24357 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24358 FPU_ARCH_VFP_V3D16,
24360 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24361 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24362 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24363 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24364 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24365 /* ??? XSCALE is really an architecture. */
24366 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24367 /* ??? iwmmxt is not a processor. */
24368 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24369 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24370 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24372 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24373 FPU_ARCH_MAVERICK, "ARM920T"),
24374 /* Marvell processors. */
24375 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
24376 FPU_ARCH_VFP_V3D16, NULL),
24378 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24382 struct arm_arch_option_table
24386 const arm_feature_set value;
24387 const arm_feature_set default_fpu;
24390 /* This list should, at a minimum, contain all the architecture names
24391 recognized by GCC. */
24392 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24393 static const struct arm_arch_option_table arm_archs[] =
24395 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24396 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24397 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24398 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24399 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24400 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24401 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24402 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24403 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24404 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24405 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24406 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24407 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24408 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24409 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24410 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24411 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24412 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24413 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
24414 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
24415 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
24416 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
24417 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
24418 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
24419 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
24420 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
24421 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
24422 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
24423 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
24424 /* The official spelling of the ARMv7 profile variants is the dashed form.
24425 Accept the non-dashed form for compatibility with old toolchains. */
24426 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24427 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
24428 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24429 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24430 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24431 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24432 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24433 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
24434 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
24435 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24436 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24437 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24438 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24440 #undef ARM_ARCH_OPT
24442 /* ISA extensions in the co-processor and main instruction set space. */
24443 struct arm_option_extension_value_table
24447 const arm_feature_set value;
24448 const arm_feature_set allowed_archs;
24451 /* The following table must be in alphabetical order with a NULL last entry.
24453 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
24454 static const struct arm_option_extension_value_table arm_extensions[] =
24456 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
24457 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24458 ARM_FEATURE (ARM_EXT_V8, 0)),
24459 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
24460 ARM_FEATURE (ARM_EXT_V8, 0)),
24461 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
24462 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24463 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
24464 ARM_EXT_OPT ("iwmmxt2",
24465 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
24466 ARM_EXT_OPT ("maverick",
24467 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
24468 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
24469 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24470 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
24471 ARM_FEATURE (ARM_EXT_V8, 0)),
24472 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
24473 ARM_FEATURE (ARM_EXT_V6M, 0)),
24474 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
24475 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
24476 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
24478 ARM_FEATURE (ARM_EXT_V7A, 0)),
24479 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
24480 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24484 /* ISA floating-point and Advanced SIMD extensions. */
24485 struct arm_option_fpu_value_table
24488 const arm_feature_set value;
24491 /* This list should, at a minimum, contain all the fpu names
24492 recognized by GCC. */
24493 static const struct arm_option_fpu_value_table arm_fpus[] =
24495 {"softfpa", FPU_NONE},
24496 {"fpe", FPU_ARCH_FPE},
24497 {"fpe2", FPU_ARCH_FPE},
24498 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24499 {"fpa", FPU_ARCH_FPA},
24500 {"fpa10", FPU_ARCH_FPA},
24501 {"fpa11", FPU_ARCH_FPA},
24502 {"arm7500fe", FPU_ARCH_FPA},
24503 {"softvfp", FPU_ARCH_VFP},
24504 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24505 {"vfp", FPU_ARCH_VFP_V2},
24506 {"vfp9", FPU_ARCH_VFP_V2},
24507 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24508 {"vfp10", FPU_ARCH_VFP_V2},
24509 {"vfp10-r0", FPU_ARCH_VFP_V1},
24510 {"vfpxd", FPU_ARCH_VFP_V1xD},
24511 {"vfpv2", FPU_ARCH_VFP_V2},
24512 {"vfpv3", FPU_ARCH_VFP_V3},
24513 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24514 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24515 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24516 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24517 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24518 {"arm1020t", FPU_ARCH_VFP_V1},
24519 {"arm1020e", FPU_ARCH_VFP_V2},
24520 {"arm1136jfs", FPU_ARCH_VFP_V2},
24521 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24522 {"maverick", FPU_ARCH_MAVERICK},
24523 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24524 {"neon-fp16", FPU_ARCH_NEON_FP16},
24525 {"vfpv4", FPU_ARCH_VFP_V4},
24526 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24527 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24528 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24529 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24530 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24531 {"crypto-neon-fp-armv8",
24532 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24533 {NULL, ARM_ARCH_NONE}
24536 struct arm_option_value_table
24542 static const struct arm_option_value_table arm_float_abis[] =
24544 {"hard", ARM_FLOAT_ABI_HARD},
24545 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24546 {"soft", ARM_FLOAT_ABI_SOFT},
24551 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24552 static const struct arm_option_value_table arm_eabis[] =
24554 {"gnu", EF_ARM_EABI_UNKNOWN},
24555 {"4", EF_ARM_EABI_VER4},
24556 {"5", EF_ARM_EABI_VER5},
24561 struct arm_long_option_table
24563 char * option; /* Substring to match. */
24564 char * help; /* Help information. */
24565 int (* func) (char * subopt); /* Function to decode sub-option. */
24566 char * deprecated; /* If non-null, print this message. */
24570 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24572 arm_feature_set *ext_set = (arm_feature_set *)
24573 xmalloc (sizeof (arm_feature_set));
24575 /* We insist on extensions being specified in alphabetical order, and with
24576 extensions being added before being removed. We achieve this by having
24577 the global ARM_EXTENSIONS table in alphabetical order, and using the
24578 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24579 or removing it (0) and only allowing it to change in the order
24581 const struct arm_option_extension_value_table * opt = NULL;
24582 int adding_value = -1;
24584 /* Copy the feature set, so that we can modify it. */
24585 *ext_set = **opt_p;
24588 while (str != NULL && *str != 0)
24595 as_bad (_("invalid architectural extension"));
24600 ext = strchr (str, '+');
24605 len = strlen (str);
24607 if (len >= 2 && strncmp (str, "no", 2) == 0)
24609 if (adding_value != 0)
24612 opt = arm_extensions;
24620 if (adding_value == -1)
24623 opt = arm_extensions;
24625 else if (adding_value != 1)
24627 as_bad (_("must specify extensions to add before specifying "
24628 "those to remove"));
24635 as_bad (_("missing architectural extension"));
24639 gas_assert (adding_value != -1);
24640 gas_assert (opt != NULL);
24642 /* Scan over the options table trying to find an exact match. */
24643 for (; opt->name != NULL; opt++)
24644 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24646 /* Check we can apply the extension to this architecture. */
24647 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24649 as_bad (_("extension does not apply to the base architecture"));
24653 /* Add or remove the extension. */
24655 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
24657 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
24662 if (opt->name == NULL)
24664 /* Did we fail to find an extension because it wasn't specified in
24665 alphabetical order, or because it does not exist? */
24667 for (opt = arm_extensions; opt->name != NULL; opt++)
24668 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24671 if (opt->name == NULL)
24672 as_bad (_("unknown architectural extension `%s'"), str);
24674 as_bad (_("architectural extensions must be specified in "
24675 "alphabetical order"));
24681 /* We should skip the extension we've just matched the next time
24693 arm_parse_cpu (char *str)
24695 const struct arm_cpu_option_table *opt;
24696 char *ext = strchr (str, '+');
24702 len = strlen (str);
24706 as_bad (_("missing cpu name `%s'"), str);
24710 for (opt = arm_cpus; opt->name != NULL; opt++)
24711 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24713 mcpu_cpu_opt = &opt->value;
24714 mcpu_fpu_opt = &opt->default_fpu;
24715 if (opt->canonical_name)
24716 strcpy (selected_cpu_name, opt->canonical_name);
24721 for (i = 0; i < len; i++)
24722 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24723 selected_cpu_name[i] = 0;
24727 return arm_parse_extension (ext, &mcpu_cpu_opt);
24732 as_bad (_("unknown cpu `%s'"), str);
24737 arm_parse_arch (char *str)
24739 const struct arm_arch_option_table *opt;
24740 char *ext = strchr (str, '+');
24746 len = strlen (str);
24750 as_bad (_("missing architecture name `%s'"), str);
24754 for (opt = arm_archs; opt->name != NULL; opt++)
24755 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24757 march_cpu_opt = &opt->value;
24758 march_fpu_opt = &opt->default_fpu;
24759 strcpy (selected_cpu_name, opt->name);
24762 return arm_parse_extension (ext, &march_cpu_opt);
24767 as_bad (_("unknown architecture `%s'\n"), str);
24772 arm_parse_fpu (char * str)
24774 const struct arm_option_fpu_value_table * opt;
24776 for (opt = arm_fpus; opt->name != NULL; opt++)
24777 if (streq (opt->name, str))
24779 mfpu_opt = &opt->value;
24783 as_bad (_("unknown floating point format `%s'\n"), str);
24788 arm_parse_float_abi (char * str)
24790 const struct arm_option_value_table * opt;
24792 for (opt = arm_float_abis; opt->name != NULL; opt++)
24793 if (streq (opt->name, str))
24795 mfloat_abi_opt = opt->value;
24799 as_bad (_("unknown floating point abi `%s'\n"), str);
24805 arm_parse_eabi (char * str)
24807 const struct arm_option_value_table *opt;
24809 for (opt = arm_eabis; opt->name != NULL; opt++)
24810 if (streq (opt->name, str))
24812 meabi_flags = opt->value;
24815 as_bad (_("unknown EABI `%s'\n"), str);
24821 arm_parse_it_mode (char * str)
24823 bfd_boolean ret = TRUE;
24825 if (streq ("arm", str))
24826 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24827 else if (streq ("thumb", str))
24828 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24829 else if (streq ("always", str))
24830 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24831 else if (streq ("never", str))
24832 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24835 as_bad (_("unknown implicit IT mode `%s', should be "\
24836 "arm, thumb, always, or never."), str);
24844 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
24846 codecomposer_syntax = TRUE;
24847 arm_comment_chars[0] = ';';
24848 arm_line_separator_chars[0] = 0;
24852 struct arm_long_option_table arm_long_opts[] =
24854 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
24855 arm_parse_cpu, NULL},
24856 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
24857 arm_parse_arch, NULL},
24858 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
24859 arm_parse_fpu, NULL},
24860 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
24861 arm_parse_float_abi, NULL},
24863 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
24864 arm_parse_eabi, NULL},
24866 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
24867 arm_parse_it_mode, NULL},
24868 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
24869 arm_ccs_mode, NULL},
24870 {NULL, NULL, 0, NULL}
24874 md_parse_option (int c, char * arg)
24876 struct arm_option_table *opt;
24877 const struct arm_legacy_option_table *fopt;
24878 struct arm_long_option_table *lopt;
24884 target_big_endian = 1;
24890 target_big_endian = 0;
24894 case OPTION_FIX_V4BX:
24899 /* Listing option. Just ignore these, we don't support additional
24904 for (opt = arm_opts; opt->option != NULL; opt++)
24906 if (c == opt->option[0]
24907 && ((arg == NULL && opt->option[1] == 0)
24908 || streq (arg, opt->option + 1)))
24910 /* If the option is deprecated, tell the user. */
24911 if (warn_on_deprecated && opt->deprecated != NULL)
24912 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24913 arg ? arg : "", _(opt->deprecated));
24915 if (opt->var != NULL)
24916 *opt->var = opt->value;
24922 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
24924 if (c == fopt->option[0]
24925 && ((arg == NULL && fopt->option[1] == 0)
24926 || streq (arg, fopt->option + 1)))
24928 /* If the option is deprecated, tell the user. */
24929 if (warn_on_deprecated && fopt->deprecated != NULL)
24930 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24931 arg ? arg : "", _(fopt->deprecated));
24933 if (fopt->var != NULL)
24934 *fopt->var = &fopt->value;
24940 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24942 /* These options are expected to have an argument. */
24943 if (c == lopt->option[0]
24945 && strncmp (arg, lopt->option + 1,
24946 strlen (lopt->option + 1)) == 0)
24948 /* If the option is deprecated, tell the user. */
24949 if (warn_on_deprecated && lopt->deprecated != NULL)
24950 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
24951 _(lopt->deprecated));
24953 /* Call the sup-option parser. */
24954 return lopt->func (arg + strlen (lopt->option) - 1);
24965 md_show_usage (FILE * fp)
24967 struct arm_option_table *opt;
24968 struct arm_long_option_table *lopt;
24970 fprintf (fp, _(" ARM-specific assembler options:\n"));
24972 for (opt = arm_opts; opt->option != NULL; opt++)
24973 if (opt->help != NULL)
24974 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
24976 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24977 if (lopt->help != NULL)
24978 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
24982 -EB assemble code for a big-endian cpu\n"));
24987 -EL assemble code for a little-endian cpu\n"));
24991 --fix-v4bx Allow BX in ARMv4 code\n"));
24999 arm_feature_set flags;
25000 } cpu_arch_ver_table;
25002 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
25003 least features first. */
25004 static const cpu_arch_ver_table cpu_arch_ver[] =
25010 {4, ARM_ARCH_V5TE},
25011 {5, ARM_ARCH_V5TEJ},
25015 {11, ARM_ARCH_V6M},
25016 {12, ARM_ARCH_V6SM},
25017 {8, ARM_ARCH_V6T2},
25018 {10, ARM_ARCH_V7VE},
25019 {10, ARM_ARCH_V7R},
25020 {10, ARM_ARCH_V7M},
25021 {14, ARM_ARCH_V8A},
25025 /* Set an attribute if it has not already been set by the user. */
25027 aeabi_set_attribute_int (int tag, int value)
25030 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25031 || !attributes_set_explicitly[tag])
25032 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25036 aeabi_set_attribute_string (int tag, const char *value)
25039 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25040 || !attributes_set_explicitly[tag])
25041 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25044 /* Set the public EABI object attributes. */
25046 aeabi_set_public_attributes (void)
25051 int fp16_optional = 0;
25052 arm_feature_set flags;
25053 arm_feature_set tmp;
25054 const cpu_arch_ver_table *p;
25056 /* Choose the architecture based on the capabilities of the requested cpu
25057 (if any) and/or the instructions actually used. */
25058 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25059 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25060 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25062 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25063 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25065 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25066 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25068 /* Allow the user to override the reported architecture. */
25071 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25072 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25075 /* We need to make sure that the attributes do not identify us as v6S-M
25076 when the only v6S-M feature in use is the Operating System Extensions. */
25077 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25078 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25079 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25083 for (p = cpu_arch_ver; p->val; p++)
25085 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25088 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25092 /* The table lookup above finds the last architecture to contribute
25093 a new feature. Unfortunately, Tag13 is a subset of the union of
25094 v6T2 and v7-M, so it is never seen as contributing a new feature.
25095 We can not search for the last entry which is entirely used,
25096 because if no CPU is specified we build up only those flags
25097 actually used. Perhaps we should separate out the specified
25098 and implicit cases. Avoid taking this path for -march=all by
25099 checking for contradictory v7-A / v7-M features. */
25101 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25102 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25103 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25106 /* Tag_CPU_name. */
25107 if (selected_cpu_name[0])
25111 q = selected_cpu_name;
25112 if (strncmp (q, "armv", 4) == 0)
25117 for (i = 0; q[i]; i++)
25118 q[i] = TOUPPER (q[i]);
25120 aeabi_set_attribute_string (Tag_CPU_name, q);
25123 /* Tag_CPU_arch. */
25124 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25126 /* Tag_CPU_arch_profile. */
25127 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
25129 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25131 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25136 if (profile != '\0')
25137 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25139 /* Tag_ARM_ISA_use. */
25140 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25142 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25144 /* Tag_THUMB_ISA_use. */
25145 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25147 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
25148 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
25150 /* Tag_VFP_arch. */
25151 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
25152 aeabi_set_attribute_int (Tag_VFP_arch, 7);
25153 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25154 aeabi_set_attribute_int (Tag_VFP_arch,
25155 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25157 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25160 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25162 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25164 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25167 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25168 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25169 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25170 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25171 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25173 /* Tag_ABI_HardFP_use. */
25174 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25175 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25176 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25178 /* Tag_WMMX_arch. */
25179 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25180 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25181 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25182 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25184 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25185 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25186 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25187 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25189 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25191 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25195 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25200 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25201 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25202 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25206 We set Tag_DIV_use to two when integer divide instructions have been used
25207 in ARM state, or when Thumb integer divide instructions have been used,
25208 but we have no architecture profile set, nor have we any ARM instructions.
25210 For ARMv8 we set the tag to 0 as integer divide is implied by the base
25213 For new architectures we will have to check these tests. */
25214 gas_assert (arch <= TAG_CPU_ARCH_V8);
25215 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25216 aeabi_set_attribute_int (Tag_DIV_use, 0);
25217 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25218 || (profile == '\0'
25219 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25220 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25221 aeabi_set_attribute_int (Tag_DIV_use, 2);
25223 /* Tag_MP_extension_use. */
25224 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25225 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25227 /* Tag Virtualization_use. */
25228 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25230 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25233 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25236 /* Add the default contents for the .ARM.attributes section. */
25240 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25243 aeabi_set_public_attributes ();
25245 #endif /* OBJ_ELF */
25248 /* Parse a .cpu directive. */
25251 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25253 const struct arm_cpu_option_table *opt;
25257 name = input_line_pointer;
25258 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25259 input_line_pointer++;
25260 saved_char = *input_line_pointer;
25261 *input_line_pointer = 0;
25263 /* Skip the first "all" entry. */
25264 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25265 if (streq (opt->name, name))
25267 mcpu_cpu_opt = &opt->value;
25268 selected_cpu = opt->value;
25269 if (opt->canonical_name)
25270 strcpy (selected_cpu_name, opt->canonical_name);
25274 for (i = 0; opt->name[i]; i++)
25275 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25277 selected_cpu_name[i] = 0;
25279 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25280 *input_line_pointer = saved_char;
25281 demand_empty_rest_of_line ();
25284 as_bad (_("unknown cpu `%s'"), name);
25285 *input_line_pointer = saved_char;
25286 ignore_rest_of_line ();
25290 /* Parse a .arch directive. */
25293 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25295 const struct arm_arch_option_table *opt;
25299 name = input_line_pointer;
25300 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25301 input_line_pointer++;
25302 saved_char = *input_line_pointer;
25303 *input_line_pointer = 0;
25305 /* Skip the first "all" entry. */
25306 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25307 if (streq (opt->name, name))
25309 mcpu_cpu_opt = &opt->value;
25310 selected_cpu = opt->value;
25311 strcpy (selected_cpu_name, opt->name);
25312 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25313 *input_line_pointer = saved_char;
25314 demand_empty_rest_of_line ();
25318 as_bad (_("unknown architecture `%s'\n"), name);
25319 *input_line_pointer = saved_char;
25320 ignore_rest_of_line ();
25324 /* Parse a .object_arch directive. */
25327 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25329 const struct arm_arch_option_table *opt;
25333 name = input_line_pointer;
25334 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25335 input_line_pointer++;
25336 saved_char = *input_line_pointer;
25337 *input_line_pointer = 0;
25339 /* Skip the first "all" entry. */
25340 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25341 if (streq (opt->name, name))
25343 object_arch = &opt->value;
25344 *input_line_pointer = saved_char;
25345 demand_empty_rest_of_line ();
25349 as_bad (_("unknown architecture `%s'\n"), name);
25350 *input_line_pointer = saved_char;
25351 ignore_rest_of_line ();
25354 /* Parse a .arch_extension directive. */
25357 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
25359 const struct arm_option_extension_value_table *opt;
25362 int adding_value = 1;
25364 name = input_line_pointer;
25365 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25366 input_line_pointer++;
25367 saved_char = *input_line_pointer;
25368 *input_line_pointer = 0;
25370 if (strlen (name) >= 2
25371 && strncmp (name, "no", 2) == 0)
25377 for (opt = arm_extensions; opt->name != NULL; opt++)
25378 if (streq (opt->name, name))
25380 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25382 as_bad (_("architectural extension `%s' is not allowed for the "
25383 "current base architecture"), name);
25388 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
25390 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
25392 mcpu_cpu_opt = &selected_cpu;
25393 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25394 *input_line_pointer = saved_char;
25395 demand_empty_rest_of_line ();
25399 if (opt->name == NULL)
25400 as_bad (_("unknown architecture extension `%s'\n"), name);
25402 *input_line_pointer = saved_char;
25403 ignore_rest_of_line ();
25406 /* Parse a .fpu directive. */
25409 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25411 const struct arm_option_fpu_value_table *opt;
25415 name = input_line_pointer;
25416 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25417 input_line_pointer++;
25418 saved_char = *input_line_pointer;
25419 *input_line_pointer = 0;
25421 for (opt = arm_fpus; opt->name != NULL; opt++)
25422 if (streq (opt->name, name))
25424 mfpu_opt = &opt->value;
25425 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25426 *input_line_pointer = saved_char;
25427 demand_empty_rest_of_line ();
25431 as_bad (_("unknown floating point format `%s'\n"), name);
25432 *input_line_pointer = saved_char;
25433 ignore_rest_of_line ();
25436 /* Copy symbol information. */
25439 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25441 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25445 /* Given a symbolic attribute NAME, return the proper integer value.
25446 Returns -1 if the attribute is not known. */
25449 arm_convert_symbolic_attribute (const char *name)
25451 static const struct
25456 attribute_table[] =
25458 /* When you modify this table you should
25459 also modify the list in doc/c-arm.texi. */
25460 #define T(tag) {#tag, tag}
25461 T (Tag_CPU_raw_name),
25464 T (Tag_CPU_arch_profile),
25465 T (Tag_ARM_ISA_use),
25466 T (Tag_THUMB_ISA_use),
25470 T (Tag_Advanced_SIMD_arch),
25471 T (Tag_PCS_config),
25472 T (Tag_ABI_PCS_R9_use),
25473 T (Tag_ABI_PCS_RW_data),
25474 T (Tag_ABI_PCS_RO_data),
25475 T (Tag_ABI_PCS_GOT_use),
25476 T (Tag_ABI_PCS_wchar_t),
25477 T (Tag_ABI_FP_rounding),
25478 T (Tag_ABI_FP_denormal),
25479 T (Tag_ABI_FP_exceptions),
25480 T (Tag_ABI_FP_user_exceptions),
25481 T (Tag_ABI_FP_number_model),
25482 T (Tag_ABI_align_needed),
25483 T (Tag_ABI_align8_needed),
25484 T (Tag_ABI_align_preserved),
25485 T (Tag_ABI_align8_preserved),
25486 T (Tag_ABI_enum_size),
25487 T (Tag_ABI_HardFP_use),
25488 T (Tag_ABI_VFP_args),
25489 T (Tag_ABI_WMMX_args),
25490 T (Tag_ABI_optimization_goals),
25491 T (Tag_ABI_FP_optimization_goals),
25492 T (Tag_compatibility),
25493 T (Tag_CPU_unaligned_access),
25494 T (Tag_FP_HP_extension),
25495 T (Tag_VFP_HP_extension),
25496 T (Tag_ABI_FP_16bit_format),
25497 T (Tag_MPextension_use),
25499 T (Tag_nodefaults),
25500 T (Tag_also_compatible_with),
25501 T (Tag_conformance),
25503 T (Tag_Virtualization_use),
25504 /* We deliberately do not include Tag_MPextension_use_legacy. */
25512 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25513 if (streq (name, attribute_table[i].name))
25514 return attribute_table[i].tag;
25520 /* Apply sym value for relocations only in the case that
25521 they are for local symbols and you have the respective
25522 architectural feature for blx and simple switches. */
25524 arm_apply_sym_value (struct fix * fixP)
25527 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25528 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25530 switch (fixP->fx_r_type)
25532 case BFD_RELOC_ARM_PCREL_BLX:
25533 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25534 if (ARM_IS_FUNC (fixP->fx_addsy))
25538 case BFD_RELOC_ARM_PCREL_CALL:
25539 case BFD_RELOC_THUMB_PCREL_BLX:
25540 if (THUMB_IS_FUNC (fixP->fx_addsy))
25551 #endif /* OBJ_ELF */