4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext {
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 condtional execution bits. */
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
100 #include "gen-icount.h"
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUARMState, regs[i]),
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUARMState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUARMState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUARMState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUARMState, exclusive_info), "exclusive_info");
135 static inline TCGv load_cpu_offset(int offset)
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
144 static inline void store_cpu_offset(TCGv var, int offset)
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
158 /* normaly, since we updated PC, we need only to add one insn */
160 addr = (long)s->pc + 2;
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
165 tcg_gen_mov_i32(var, cpu_R[reg]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext *s, int reg, TCGv var)
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp)
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
216 static void gen_smul_dual(TCGv a, TCGv b)
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
293 tcg_temp_free_i64(tmp64);
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
307 tcg_temp_free_i64(tmp64);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0, TCGv t1)
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
380 tcg_temp_free_i32(tmp);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
391 static void gen_adc(TCGv t0, TCGv t1)
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var, int shift)
426 TCGv tmp = tcg_temp_new_i32();
428 tcg_gen_andi_i32(tmp, var, 1);
430 tcg_gen_shri_i32(tmp, var, shift);
432 tcg_gen_andi_i32(tmp, tmp, 1);
435 tcg_temp_free_i32(tmp);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
452 tcg_gen_shri_i32(var, var, 31);
455 tcg_gen_movi_i32(var, 0);
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
466 shifter_out_im(var, shift - 1);
469 tcg_gen_sari_i32(var, var, shift);
471 case 3: /* ROR/RRX */
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
477 TCGv tmp = load_cpu_field(CF);
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
507 tcg_temp_free_i32(shift);
510 #define PAS_OP(pfx) \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
529 tcg_temp_free_ptr(tmp);
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
535 tcg_temp_free_ptr(tmp);
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 #undef gen_pas_helper
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
576 tcg_temp_free_ptr(tmp);
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
582 tcg_temp_free_ptr(tmp);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 static void gen_test_cc(int cc, int label)
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
698 tcg_temp_free_i32(tmp);
701 static const uint8_t table_logic_cc[16] = {
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
730 tcg_temp_free_i32(tmp);
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
750 if (reg == 15 && ENABLE_ARCH_7) {
753 store_reg(s, reg, var);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
764 if (reg == 15 && ENABLE_ARCH_5) {
767 store_reg(s, reg, var);
771 static inline TCGv gen_ld8s(TCGv addr, int index)
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
777 static inline TCGv gen_ld8u(TCGv addr, int index)
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
783 static inline TCGv gen_ld16s(TCGv addr, int index)
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
789 static inline TCGv gen_ld16u(TCGv addr, int index)
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
795 static inline TCGv gen_ld32(TCGv addr, int index)
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
828 static inline void gen_set_pc_im(uint32_t val)
830 tcg_gen_movi_i32(cpu_R[15], val);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
843 int val, rm, shift, shiftop;
846 if (!(insn & (1 << 25))) {
849 if (!(insn & (1 << 23)))
852 tcg_gen_addi_i32(var, var, val);
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
874 if (insn & (1 << 22)) {
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
881 tcg_gen_addi_i32(var, var, val);
885 tcg_gen_addi_i32(var, var, extra);
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
896 static TCGv_ptr get_fpstatus_ptr(int neon)
898 TCGv_ptr statusptr = tcg_temp_new_ptr();
901 offset = offsetof(CPUARMState, vfp.standard_fp_status);
903 offset = offsetof(CPUARMState, vfp.fp_status);
905 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
918 tcg_temp_free_ptr(fpst); \
928 static inline void gen_vfp_F1_mul(int dp)
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst = get_fpstatus_ptr(0);
933 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
935 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
937 tcg_temp_free_ptr(fpst);
940 static inline void gen_vfp_F1_neg(int dp)
942 /* Like gen_vfp_neg() but put result in F1 */
944 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
946 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
950 static inline void gen_vfp_abs(int dp)
953 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
955 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
958 static inline void gen_vfp_neg(int dp)
961 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
963 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
966 static inline void gen_vfp_sqrt(int dp)
969 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
971 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
974 static inline void gen_vfp_cmp(int dp)
977 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
979 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
982 static inline void gen_vfp_cmpe(int dp)
985 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
987 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
990 static inline void gen_vfp_F1_ld0(int dp)
993 tcg_gen_movi_i64(cpu_F1d, 0);
995 tcg_gen_movi_i32(cpu_F1s, 0);
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1007 tcg_temp_free_ptr(statusptr); \
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 tcg_temp_free_ptr(statusptr); \
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1055 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1058 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1060 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1063 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1066 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1068 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1072 vfp_reg_offset (int dp, int reg)
1075 return offsetof(CPUARMState, vfp.regs[reg]);
1077 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1078 + offsetof(CPU_DoubleU, l.upper);
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.lower);
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1088 neon_reg_offset (int reg, int n)
1092 return vfp_reg_offset(0, sreg);
1095 static TCGv neon_load_reg(int reg, int pass)
1097 TCGv tmp = tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1102 static void neon_store_reg(int reg, int pass, TCGv var)
1104 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1105 tcg_temp_free_i32(var);
1108 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1110 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1113 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1115 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1123 static inline void gen_mov_F0_vreg(int dp, int reg)
1126 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1128 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1131 static inline void gen_mov_F1_vreg(int dp, int reg)
1134 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1136 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1139 static inline void gen_mov_vreg_F0(int dp, int reg)
1142 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1144 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1147 #define ARM_CP_RW_BIT (1 << 20)
1149 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1151 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1154 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1156 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1159 static inline TCGv iwmmxt_load_creg(int reg)
1161 TCGv var = tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1166 static inline void iwmmxt_store_creg(int reg, TCGv var)
1168 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1169 tcg_temp_free_i32(var);
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1174 iwmmxt_store_reg(cpu_M0, rn);
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1179 iwmmxt_load_reg(cpu_M0, rn);
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1184 iwmmxt_load_reg(cpu_V1, rn);
1185 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1190 iwmmxt_load_reg(cpu_V1, rn);
1191 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1196 iwmmxt_load_reg(cpu_V1, rn);
1197 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1236 IWMMXT_OP_ENV_SIZE(unpackl)
1237 IWMMXT_OP_ENV_SIZE(unpackh)
1239 IWMMXT_OP_ENV1(unpacklub)
1240 IWMMXT_OP_ENV1(unpackluw)
1241 IWMMXT_OP_ENV1(unpacklul)
1242 IWMMXT_OP_ENV1(unpackhub)
1243 IWMMXT_OP_ENV1(unpackhuw)
1244 IWMMXT_OP_ENV1(unpackhul)
1245 IWMMXT_OP_ENV1(unpacklsb)
1246 IWMMXT_OP_ENV1(unpacklsw)
1247 IWMMXT_OP_ENV1(unpacklsl)
1248 IWMMXT_OP_ENV1(unpackhsb)
1249 IWMMXT_OP_ENV1(unpackhsw)
1250 IWMMXT_OP_ENV1(unpackhsl)
1252 IWMMXT_OP_ENV_SIZE(cmpeq)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu)
1254 IWMMXT_OP_ENV_SIZE(cmpgts)
1256 IWMMXT_OP_ENV_SIZE(mins)
1257 IWMMXT_OP_ENV_SIZE(minu)
1258 IWMMXT_OP_ENV_SIZE(maxs)
1259 IWMMXT_OP_ENV_SIZE(maxu)
1261 IWMMXT_OP_ENV_SIZE(subn)
1262 IWMMXT_OP_ENV_SIZE(addn)
1263 IWMMXT_OP_ENV_SIZE(subu)
1264 IWMMXT_OP_ENV_SIZE(addu)
1265 IWMMXT_OP_ENV_SIZE(subs)
1266 IWMMXT_OP_ENV_SIZE(adds)
1268 IWMMXT_OP_ENV(avgb0)
1269 IWMMXT_OP_ENV(avgb1)
1270 IWMMXT_OP_ENV(avgw0)
1271 IWMMXT_OP_ENV(avgw1)
1275 IWMMXT_OP_ENV(packuw)
1276 IWMMXT_OP_ENV(packul)
1277 IWMMXT_OP_ENV(packuq)
1278 IWMMXT_OP_ENV(packsw)
1279 IWMMXT_OP_ENV(packsl)
1280 IWMMXT_OP_ENV(packsq)
1282 static void gen_op_iwmmxt_set_mup(void)
1285 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1286 tcg_gen_ori_i32(tmp, tmp, 2);
1287 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1290 static void gen_op_iwmmxt_set_cup(void)
1293 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1294 tcg_gen_ori_i32(tmp, tmp, 1);
1295 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1300 TCGv tmp = tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1302 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1307 iwmmxt_load_reg(cpu_V1, rn);
1308 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1309 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1312 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1318 rd = (insn >> 16) & 0xf;
1319 tmp = load_reg(s, rd);
1321 offset = (insn & 0xff) << ((insn >> 7) & 2);
1322 if (insn & (1 << 24)) {
1324 if (insn & (1 << 23))
1325 tcg_gen_addi_i32(tmp, tmp, offset);
1327 tcg_gen_addi_i32(tmp, tmp, -offset);
1328 tcg_gen_mov_i32(dest, tmp);
1329 if (insn & (1 << 21))
1330 store_reg(s, rd, tmp);
1332 tcg_temp_free_i32(tmp);
1333 } else if (insn & (1 << 21)) {
1335 tcg_gen_mov_i32(dest, tmp);
1336 if (insn & (1 << 23))
1337 tcg_gen_addi_i32(tmp, tmp, offset);
1339 tcg_gen_addi_i32(tmp, tmp, -offset);
1340 store_reg(s, rd, tmp);
1341 } else if (!(insn & (1 << 23)))
1346 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1348 int rd = (insn >> 0) & 0xf;
1351 if (insn & (1 << 8)) {
1352 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1355 tmp = iwmmxt_load_creg(rd);
1358 tmp = tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0, rd);
1360 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1362 tcg_gen_andi_i32(tmp, tmp, mask);
1363 tcg_gen_mov_i32(dest, tmp);
1364 tcg_temp_free_i32(tmp);
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1373 int rdhi, rdlo, rd0, rd1, i;
1375 TCGv tmp, tmp2, tmp3;
1377 if ((insn & 0x0e000e00) == 0x0c000000) {
1378 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1380 rdlo = (insn >> 12) & 0xf;
1381 rdhi = (insn >> 16) & 0xf;
1382 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0, wrd);
1384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1389 iwmmxt_store_reg(cpu_V0, wrd);
1390 gen_op_iwmmxt_set_mup();
1395 wrd = (insn >> 12) & 0xf;
1396 addr = tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s, insn, addr)) {
1398 tcg_temp_free_i32(addr);
1401 if (insn & ARM_CP_RW_BIT) {
1402 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1403 tmp = tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1405 iwmmxt_store_creg(wrd, tmp);
1408 if (insn & (1 << 8)) {
1409 if (insn & (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1412 } else { /* WLDRW wRd */
1413 tmp = gen_ld32(addr, IS_USER(s));
1416 if (insn & (1 << 22)) { /* WLDRH */
1417 tmp = gen_ld16u(addr, IS_USER(s));
1418 } else { /* WLDRB */
1419 tmp = gen_ld8u(addr, IS_USER(s));
1423 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1424 tcg_temp_free_i32(tmp);
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1429 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1430 tmp = iwmmxt_load_creg(wrd);
1431 gen_st32(tmp, addr, IS_USER(s));
1433 gen_op_iwmmxt_movq_M0_wRn(wrd);
1434 tmp = tcg_temp_new_i32();
1435 if (insn & (1 << 8)) {
1436 if (insn & (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp);
1438 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1441 gen_st32(tmp, addr, IS_USER(s));
1444 if (insn & (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1446 gen_st16(tmp, addr, IS_USER(s));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1449 gen_st8(tmp, addr, IS_USER(s));
1454 tcg_temp_free_i32(addr);
1458 if ((insn & 0x0f000000) != 0x0e000000)
1461 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 0) & 0xf;
1465 rd1 = (insn >> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1473 case 0x011: /* TMCR */
1476 rd = (insn >> 12) & 0xf;
1477 wrd = (insn >> 16) & 0xf;
1479 case ARM_IWMMXT_wCID:
1480 case ARM_IWMMXT_wCASF:
1482 case ARM_IWMMXT_wCon:
1483 gen_op_iwmmxt_set_cup();
1485 case ARM_IWMMXT_wCSSF:
1486 tmp = iwmmxt_load_creg(wrd);
1487 tmp2 = load_reg(s, rd);
1488 tcg_gen_andc_i32(tmp, tmp, tmp2);
1489 tcg_temp_free_i32(tmp2);
1490 iwmmxt_store_creg(wrd, tmp);
1492 case ARM_IWMMXT_wCGR0:
1493 case ARM_IWMMXT_wCGR1:
1494 case ARM_IWMMXT_wCGR2:
1495 case ARM_IWMMXT_wCGR3:
1496 gen_op_iwmmxt_set_cup();
1497 tmp = load_reg(s, rd);
1498 iwmmxt_store_creg(wrd, tmp);
1504 case 0x100: /* WXOR */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1515 case 0x111: /* TMRC */
1518 rd = (insn >> 12) & 0xf;
1519 wrd = (insn >> 16) & 0xf;
1520 tmp = iwmmxt_load_creg(wrd);
1521 store_reg(s, rd, tmp);
1523 case 0x300: /* WANDN */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x200: /* WAND */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 0) & 0xf;
1538 rd1 = (insn >> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 switch ((insn >> 22) & 3) {
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 16) & 0xf;
1583 rd1 = (insn >> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 switch ((insn >> 22) & 3) {
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd = (insn >> 12) & 0xf;
1604 rd0 = (insn >> 16) & 0xf;
1605 rd1 = (insn >> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0);
1607 if (insn & (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1611 if (!(insn & (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 if (insn & (1 << 21)) {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 if (insn & (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1644 if (!(insn & (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1, wrd);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 rd1 = (insn >> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 switch ((insn >> 22) & 3) {
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
1678 if (insn & (1 << 22)) {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1684 if (insn & (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1699 tcg_gen_andi_i32(tmp, tmp, 7);
1700 iwmmxt_load_reg(cpu_V1, rd1);
1701 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1702 tcg_temp_free_i32(tmp);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn >> 6) & 3) == 3)
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 tmp = load_reg(s, rd);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd);
1713 switch ((insn >> 6) & 3) {
1715 tmp2 = tcg_const_i32(0xff);
1716 tmp3 = tcg_const_i32((insn & 7) << 3);
1719 tmp2 = tcg_const_i32(0xffff);
1720 tmp3 = tcg_const_i32((insn & 3) << 4);
1723 tmp2 = tcg_const_i32(0xffffffff);
1724 tmp3 = tcg_const_i32((insn & 1) << 5);
1730 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1731 tcg_temp_free(tmp3);
1732 tcg_temp_free(tmp2);
1733 tcg_temp_free_i32(tmp);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd = (insn >> 12) & 0xf;
1739 wrd = (insn >> 16) & 0xf;
1740 if (rd == 15 || ((insn >> 22) & 3) == 3)
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
1743 tmp = tcg_temp_new_i32();
1744 switch ((insn >> 22) & 3) {
1746 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1749 tcg_gen_ext8s_i32(tmp, tmp);
1751 tcg_gen_andi_i32(tmp, tmp, 0xff);
1755 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1758 tcg_gen_ext16s_i32(tmp, tmp);
1760 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1764 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1768 store_reg(s, rd, tmp);
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1773 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1774 switch ((insn >> 22) & 3) {
1776 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1779 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1782 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1785 tcg_gen_shli_i32(tmp, tmp, 28);
1787 tcg_temp_free_i32(tmp);
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn >> 6) & 3) == 3)
1792 rd = (insn >> 12) & 0xf;
1793 wrd = (insn >> 16) & 0xf;
1794 tmp = load_reg(s, rd);
1795 switch ((insn >> 6) & 3) {
1797 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1800 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1803 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1806 tcg_temp_free_i32(tmp);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1814 tmp2 = tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2, tmp);
1816 switch ((insn >> 22) & 3) {
1818 for (i = 0; i < 7; i ++) {
1819 tcg_gen_shli_i32(tmp2, tmp2, 4);
1820 tcg_gen_and_i32(tmp, tmp, tmp2);
1824 for (i = 0; i < 3; i ++) {
1825 tcg_gen_shli_i32(tmp2, tmp2, 8);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
1830 tcg_gen_shli_i32(tmp2, tmp2, 16);
1831 tcg_gen_and_i32(tmp, tmp, tmp2);
1835 tcg_temp_free_i32(tmp2);
1836 tcg_temp_free_i32(tmp);
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd = (insn >> 12) & 0xf;
1840 rd0 = (insn >> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 switch ((insn >> 22) & 3) {
1844 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1847 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1850 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1862 tmp2 = tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2, tmp);
1864 switch ((insn >> 22) & 3) {
1866 for (i = 0; i < 7; i ++) {
1867 tcg_gen_shli_i32(tmp2, tmp2, 4);
1868 tcg_gen_or_i32(tmp, tmp, tmp2);
1872 for (i = 0; i < 3; i ++) {
1873 tcg_gen_shli_i32(tmp2, tmp2, 8);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
1878 tcg_gen_shli_i32(tmp2, tmp2, 16);
1879 tcg_gen_or_i32(tmp, tmp, tmp2);
1883 tcg_temp_free_i32(tmp2);
1884 tcg_temp_free_i32(tmp);
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
1889 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
1892 tmp = tcg_temp_new_i32();
1893 switch ((insn >> 22) & 3) {
1895 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1898 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1901 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1904 store_reg(s, rd, tmp);
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1948 gen_op_iwmmxt_unpacklub_M0();
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1954 gen_op_iwmmxt_unpackluw_M0();
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1960 gen_op_iwmmxt_unpacklul_M0();
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1979 gen_op_iwmmxt_unpackhub_M0();
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1985 gen_op_iwmmxt_unpackhuw_M0();
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1991 gen_op_iwmmxt_unpackhul_M0();
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn >> 22) & 3) == 0)
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 tmp = tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2009 tcg_temp_free_i32(tmp);
2012 switch ((insn >> 22) & 3) {
2014 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2017 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2020 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2023 tcg_temp_free_i32(tmp);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn >> 22) & 3) == 0)
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 tmp = tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2037 tcg_temp_free_i32(tmp);
2040 switch ((insn >> 22) & 3) {
2042 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2045 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2048 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2051 tcg_temp_free_i32(tmp);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn >> 22) & 3) == 0)
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 tmp = tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2065 tcg_temp_free_i32(tmp);
2068 switch ((insn >> 22) & 3) {
2070 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2073 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2076 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2079 tcg_temp_free_i32(tmp);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn >> 22) & 3) == 0)
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 tmp = tcg_temp_new_i32();
2092 switch ((insn >> 22) & 3) {
2094 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2095 tcg_temp_free_i32(tmp);
2098 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2101 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2102 tcg_temp_free_i32(tmp);
2105 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2108 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2109 tcg_temp_free_i32(tmp);
2112 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2115 tcg_temp_free_i32(tmp);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2131 gen_op_iwmmxt_minub_M0_wRn(rd1);
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2143 gen_op_iwmmxt_minul_M0_wRn(rd1);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2165 if (insn & (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2171 if (insn & (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd = (insn >> 12) & 0xf;
2185 rd0 = (insn >> 16) & 0xf;
2186 rd1 = (insn >> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0);
2188 tmp = tcg_const_i32((insn >> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1, rd1);
2190 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 20) & 0xf) {
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2208 gen_op_iwmmxt_subub_M0_wRn(rd1);
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2226 gen_op_iwmmxt_subul_M0_wRn(rd1);
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 20) & 0xf) {
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2265 gen_op_iwmmxt_addub_M0_wRn(rd1);
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2283 gen_op_iwmmxt_addul_M0_wRn(rd1);
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2301 wrd = (insn >> 12) & 0xf;
2302 rd0 = (insn >> 16) & 0xf;
2303 rd1 = (insn >> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0);
2305 switch ((insn >> 22) & 3) {
2307 if (insn & (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2316 gen_op_iwmmxt_packul_M0_wRn(rd1);
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd = (insn >> 5) & 0xf;
2334 rd0 = (insn >> 12) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 if (rd0 == 0xf || rd1 == 0xf)
2338 gen_op_iwmmxt_movq_M0_wRn(wrd);
2339 tmp = load_reg(s, rd0);
2340 tmp2 = load_reg(s, rd1);
2341 switch ((insn >> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn & (1 << 16))
2350 tcg_gen_shri_i32(tmp, tmp, 16);
2351 if (insn & (1 << 17))
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
2360 tcg_temp_free_i32(tmp2);
2361 tcg_temp_free_i32(tmp);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd);
2363 gen_op_iwmmxt_set_mup();
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2376 int acc, rd0, rd1, rdhi, rdlo;
2379 if ((insn & 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0 = (insn >> 12) & 0xf;
2383 acc = (insn >> 5) & 7;
2388 tmp = load_reg(s, rd0);
2389 tmp2 = load_reg(s, rd1);
2390 switch ((insn >> 16) & 0xf) {
2392 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn & (1 << 16))
2402 tcg_gen_shri_i32(tmp, tmp, 16);
2403 if (insn & (1 << 17))
2404 tcg_gen_shri_i32(tmp2, tmp2, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2410 tcg_temp_free_i32(tmp2);
2411 tcg_temp_free_i32(tmp);
2413 gen_op_iwmmxt_movq_wRn_M0(acc);
2417 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi = (insn >> 16) & 0xf;
2420 rdlo = (insn >> 12) & 0xf;
2426 if (insn & ARM_CP_RW_BIT) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0, acc);
2428 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2429 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2431 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2433 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2434 iwmmxt_store_reg(cpu_V0, acc);
2442 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2443 instruction is not defined. */
2444 static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2449 /* M profile cores use memory mapped registers instead of cp15. */
2450 if (arm_feature(env, ARM_FEATURE_M))
2453 if ((insn & (1 << 25)) == 0) {
2454 if (insn & (1 << 20)) {
2458 /* mcrr. Used for block cache operations, so implement as no-op. */
2461 if ((insn & (1 << 4)) == 0) {
2470 rd = (insn >> 12) & 0xf;
2472 tmp2 = tcg_const_i32(insn);
2473 if (insn & ARM_CP_RW_BIT) {
2474 tmp = tcg_temp_new_i32();
2475 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2476 /* If the destination register is r15 then sets condition codes. */
2478 store_reg(s, rd, tmp);
2480 tcg_temp_free_i32(tmp);
2482 tmp = load_reg(s, rd);
2483 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2484 tcg_temp_free_i32(tmp);
2485 /* Normally we would always end the TB here, but Linux
2486 * arch/arm/mach-pxa/sleep.S expects two instructions following
2487 * an MMU enable to execute from cache. Imitate this behaviour. */
2488 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2489 (insn & 0x0fff0fff) != 0x0e010f10)
2492 tcg_temp_free_i32(tmp2);
2496 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2497 #define VFP_SREG(insn, bigbit, smallbit) \
2498 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2499 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2500 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2501 reg = (((insn) >> (bigbit)) & 0x0f) \
2502 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2504 if (insn & (1 << (smallbit))) \
2506 reg = ((insn) >> (bigbit)) & 0x0f; \
2509 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2510 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2511 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2512 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2513 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2514 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2516 /* Move between integer and VFP cores. */
2517 static TCGv gen_vfp_mrs(void)
2519 TCGv tmp = tcg_temp_new_i32();
2520 tcg_gen_mov_i32(tmp, cpu_F0s);
2524 static void gen_vfp_msr(TCGv tmp)
2526 tcg_gen_mov_i32(cpu_F0s, tmp);
2527 tcg_temp_free_i32(tmp);
2530 static void gen_neon_dup_u8(TCGv var, int shift)
2532 TCGv tmp = tcg_temp_new_i32();
2534 tcg_gen_shri_i32(var, var, shift);
2535 tcg_gen_ext8u_i32(var, var);
2536 tcg_gen_shli_i32(tmp, var, 8);
2537 tcg_gen_or_i32(var, var, tmp);
2538 tcg_gen_shli_i32(tmp, var, 16);
2539 tcg_gen_or_i32(var, var, tmp);
2540 tcg_temp_free_i32(tmp);
2543 static void gen_neon_dup_low16(TCGv var)
2545 TCGv tmp = tcg_temp_new_i32();
2546 tcg_gen_ext16u_i32(var, var);
2547 tcg_gen_shli_i32(tmp, var, 16);
2548 tcg_gen_or_i32(var, var, tmp);
2549 tcg_temp_free_i32(tmp);
2552 static void gen_neon_dup_high16(TCGv var)
2554 TCGv tmp = tcg_temp_new_i32();
2555 tcg_gen_andi_i32(var, var, 0xffff0000);
2556 tcg_gen_shri_i32(tmp, var, 16);
2557 tcg_gen_or_i32(var, var, tmp);
2558 tcg_temp_free_i32(tmp);
2561 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2563 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2567 tmp = gen_ld8u(addr, IS_USER(s));
2568 gen_neon_dup_u8(tmp, 0);
2571 tmp = gen_ld16u(addr, IS_USER(s));
2572 gen_neon_dup_low16(tmp);
2575 tmp = gen_ld32(addr, IS_USER(s));
2577 default: /* Avoid compiler warnings. */
2583 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2584 (ie. an undefined instruction). */
2585 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2587 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2593 if (!arm_feature(env, ARM_FEATURE_VFP))
2596 if (!s->vfp_enabled) {
2597 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2598 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2600 rn = (insn >> 16) & 0xf;
2601 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2602 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2605 dp = ((insn & 0xf00) == 0xb00);
2606 switch ((insn >> 24) & 0xf) {
2608 if (insn & (1 << 4)) {
2609 /* single register transfer */
2610 rd = (insn >> 12) & 0xf;
2615 VFP_DREG_N(rn, insn);
2618 if (insn & 0x00c00060
2619 && !arm_feature(env, ARM_FEATURE_NEON))
2622 pass = (insn >> 21) & 1;
2623 if (insn & (1 << 22)) {
2625 offset = ((insn >> 5) & 3) * 8;
2626 } else if (insn & (1 << 5)) {
2628 offset = (insn & (1 << 6)) ? 16 : 0;
2633 if (insn & ARM_CP_RW_BIT) {
2635 tmp = neon_load_reg(rn, pass);
2639 tcg_gen_shri_i32(tmp, tmp, offset);
2640 if (insn & (1 << 23))
2646 if (insn & (1 << 23)) {
2648 tcg_gen_shri_i32(tmp, tmp, 16);
2654 tcg_gen_sari_i32(tmp, tmp, 16);
2663 store_reg(s, rd, tmp);
2666 tmp = load_reg(s, rd);
2667 if (insn & (1 << 23)) {
2670 gen_neon_dup_u8(tmp, 0);
2671 } else if (size == 1) {
2672 gen_neon_dup_low16(tmp);
2674 for (n = 0; n <= pass * 2; n++) {
2675 tmp2 = tcg_temp_new_i32();
2676 tcg_gen_mov_i32(tmp2, tmp);
2677 neon_store_reg(rn, n, tmp2);
2679 neon_store_reg(rn, n, tmp);
2684 tmp2 = neon_load_reg(rn, pass);
2685 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2686 tcg_temp_free_i32(tmp2);
2689 tmp2 = neon_load_reg(rn, pass);
2690 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2691 tcg_temp_free_i32(tmp2);
2696 neon_store_reg(rn, pass, tmp);
2700 if ((insn & 0x6f) != 0x00)
2702 rn = VFP_SREG_N(insn);
2703 if (insn & ARM_CP_RW_BIT) {
2705 if (insn & (1 << 21)) {
2706 /* system register */
2711 /* VFP2 allows access to FSID from userspace.
2712 VFP3 restricts all id registers to privileged
2715 && arm_feature(env, ARM_FEATURE_VFP3))
2717 tmp = load_cpu_field(vfp.xregs[rn]);
2722 tmp = load_cpu_field(vfp.xregs[rn]);
2724 case ARM_VFP_FPINST:
2725 case ARM_VFP_FPINST2:
2726 /* Not present in VFP3. */
2728 || arm_feature(env, ARM_FEATURE_VFP3))
2730 tmp = load_cpu_field(vfp.xregs[rn]);
2734 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2735 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2737 tmp = tcg_temp_new_i32();
2738 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2744 || !arm_feature(env, ARM_FEATURE_MVFR))
2746 tmp = load_cpu_field(vfp.xregs[rn]);
2752 gen_mov_F0_vreg(0, rn);
2753 tmp = gen_vfp_mrs();
2756 /* Set the 4 flag bits in the CPSR. */
2758 tcg_temp_free_i32(tmp);
2760 store_reg(s, rd, tmp);
2764 tmp = load_reg(s, rd);
2765 if (insn & (1 << 21)) {
2767 /* system register */
2772 /* Writes are ignored. */
2775 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2776 tcg_temp_free_i32(tmp);
2782 /* TODO: VFP subarchitecture support.
2783 * For now, keep the EN bit only */
2784 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2785 store_cpu_field(tmp, vfp.xregs[rn]);
2788 case ARM_VFP_FPINST:
2789 case ARM_VFP_FPINST2:
2790 store_cpu_field(tmp, vfp.xregs[rn]);
2797 gen_mov_vreg_F0(0, rn);
2802 /* data processing */
2803 /* The opcode is in bits 23, 21, 20 and 6. */
2804 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2808 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2810 /* rn is register number */
2811 VFP_DREG_N(rn, insn);
2814 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2815 /* Integer or single precision destination. */
2816 rd = VFP_SREG_D(insn);
2818 VFP_DREG_D(rd, insn);
2821 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2822 /* VCVT from int is always from S reg regardless of dp bit.
2823 * VCVT with immediate frac_bits has same format as SREG_M
2825 rm = VFP_SREG_M(insn);
2827 VFP_DREG_M(rm, insn);
2830 rn = VFP_SREG_N(insn);
2831 if (op == 15 && rn == 15) {
2832 /* Double precision destination. */
2833 VFP_DREG_D(rd, insn);
2835 rd = VFP_SREG_D(insn);
2837 /* NB that we implicitly rely on the encoding for the frac_bits
2838 * in VCVT of fixed to float being the same as that of an SREG_M
2840 rm = VFP_SREG_M(insn);
2843 veclen = s->vec_len;
2844 if (op == 15 && rn > 3)
2847 /* Shut up compiler warnings. */
2858 /* Figure out what type of vector operation this is. */
2859 if ((rd & bank_mask) == 0) {
2864 delta_d = (s->vec_stride >> 1) + 1;
2866 delta_d = s->vec_stride + 1;
2868 if ((rm & bank_mask) == 0) {
2869 /* mixed scalar/vector */
2878 /* Load the initial operands. */
2883 /* Integer source */
2884 gen_mov_F0_vreg(0, rm);
2889 gen_mov_F0_vreg(dp, rd);
2890 gen_mov_F1_vreg(dp, rm);
2894 /* Compare with zero */
2895 gen_mov_F0_vreg(dp, rd);
2906 /* Source and destination the same. */
2907 gen_mov_F0_vreg(dp, rd);
2913 /* VCVTB, VCVTT: only present with the halfprec extension,
2914 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2916 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2919 /* Otherwise fall through */
2921 /* One source operand. */
2922 gen_mov_F0_vreg(dp, rm);
2926 /* Two source operands. */
2927 gen_mov_F0_vreg(dp, rn);
2928 gen_mov_F1_vreg(dp, rm);
2932 /* Perform the calculation. */
2934 case 0: /* VMLA: fd + (fn * fm) */
2935 /* Note that order of inputs to the add matters for NaNs */
2937 gen_mov_F0_vreg(dp, rd);
2940 case 1: /* VMLS: fd + -(fn * fm) */
2943 gen_mov_F0_vreg(dp, rd);
2946 case 2: /* VNMLS: -fd + (fn * fm) */
2947 /* Note that it isn't valid to replace (-A + B) with (B - A)
2948 * or similar plausible looking simplifications
2949 * because this will give wrong results for NaNs.
2952 gen_mov_F0_vreg(dp, rd);
2956 case 3: /* VNMLA: -fd + -(fn * fm) */
2959 gen_mov_F0_vreg(dp, rd);
2963 case 4: /* mul: fn * fm */
2966 case 5: /* nmul: -(fn * fm) */
2970 case 6: /* add: fn + fm */
2973 case 7: /* sub: fn - fm */
2976 case 8: /* div: fn / fm */
2979 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2980 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2981 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2982 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2983 /* These are fused multiply-add, and must be done as one
2984 * floating point operation with no rounding between the
2985 * multiplication and addition steps.
2986 * NB that doing the negations here as separate steps is
2987 * correct : an input NaN should come out with its sign bit
2988 * flipped if it is a negated-input.
2990 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2998 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3000 frd = tcg_temp_new_i64();
3001 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3004 gen_helper_vfp_negd(frd, frd);
3006 fpst = get_fpstatus_ptr(0);
3007 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3008 cpu_F1d, frd, fpst);
3009 tcg_temp_free_ptr(fpst);
3010 tcg_temp_free_i64(frd);
3016 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3018 frd = tcg_temp_new_i32();
3019 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3021 gen_helper_vfp_negs(frd, frd);
3023 fpst = get_fpstatus_ptr(0);
3024 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3025 cpu_F1s, frd, fpst);
3026 tcg_temp_free_ptr(fpst);
3027 tcg_temp_free_i32(frd);
3030 case 14: /* fconst */
3031 if (!arm_feature(env, ARM_FEATURE_VFP3))
3034 n = (insn << 12) & 0x80000000;
3035 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3042 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3049 tcg_gen_movi_i32(cpu_F0s, n);
3052 case 15: /* extension space */
3066 case 4: /* vcvtb.f32.f16 */
3067 tmp = gen_vfp_mrs();
3068 tcg_gen_ext16u_i32(tmp, tmp);
3069 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070 tcg_temp_free_i32(tmp);
3072 case 5: /* vcvtt.f32.f16 */
3073 tmp = gen_vfp_mrs();
3074 tcg_gen_shri_i32(tmp, tmp, 16);
3075 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3076 tcg_temp_free_i32(tmp);
3078 case 6: /* vcvtb.f16.f32 */
3079 tmp = tcg_temp_new_i32();
3080 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3081 gen_mov_F0_vreg(0, rd);
3082 tmp2 = gen_vfp_mrs();
3083 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3084 tcg_gen_or_i32(tmp, tmp, tmp2);
3085 tcg_temp_free_i32(tmp2);
3088 case 7: /* vcvtt.f16.f32 */
3089 tmp = tcg_temp_new_i32();
3090 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3091 tcg_gen_shli_i32(tmp, tmp, 16);
3092 gen_mov_F0_vreg(0, rd);
3093 tmp2 = gen_vfp_mrs();
3094 tcg_gen_ext16u_i32(tmp2, tmp2);
3095 tcg_gen_or_i32(tmp, tmp, tmp2);
3096 tcg_temp_free_i32(tmp2);
3108 case 11: /* cmpez */
3112 case 15: /* single<->double conversion */
3114 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3116 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3118 case 16: /* fuito */
3119 gen_vfp_uito(dp, 0);
3121 case 17: /* fsito */
3122 gen_vfp_sito(dp, 0);
3124 case 20: /* fshto */
3125 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 gen_vfp_shto(dp, 16 - rm, 0);
3129 case 21: /* fslto */
3130 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 gen_vfp_slto(dp, 32 - rm, 0);
3134 case 22: /* fuhto */
3135 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 gen_vfp_uhto(dp, 16 - rm, 0);
3139 case 23: /* fulto */
3140 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 gen_vfp_ulto(dp, 32 - rm, 0);
3144 case 24: /* ftoui */
3145 gen_vfp_toui(dp, 0);
3147 case 25: /* ftouiz */
3148 gen_vfp_touiz(dp, 0);
3150 case 26: /* ftosi */
3151 gen_vfp_tosi(dp, 0);
3153 case 27: /* ftosiz */
3154 gen_vfp_tosiz(dp, 0);
3156 case 28: /* ftosh */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 gen_vfp_tosh(dp, 16 - rm, 0);
3161 case 29: /* ftosl */
3162 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 gen_vfp_tosl(dp, 32 - rm, 0);
3166 case 30: /* ftouh */
3167 if (!arm_feature(env, ARM_FEATURE_VFP3))
3169 gen_vfp_touh(dp, 16 - rm, 0);
3171 case 31: /* ftoul */
3172 if (!arm_feature(env, ARM_FEATURE_VFP3))
3174 gen_vfp_toul(dp, 32 - rm, 0);
3176 default: /* undefined */
3180 default: /* undefined */
3184 /* Write back the result. */
3185 if (op == 15 && (rn >= 8 && rn <= 11))
3186 ; /* Comparison, do nothing. */
3187 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188 /* VCVT double to int: always integer result. */
3189 gen_mov_vreg_F0(0, rd);
3190 else if (op == 15 && rn == 15)
3192 gen_mov_vreg_F0(!dp, rd);
3194 gen_mov_vreg_F0(dp, rd);
3196 /* break out of the loop if we have finished */
3200 if (op == 15 && delta_m == 0) {
3201 /* single source one-many */
3203 rd = ((rd + delta_d) & (bank_mask - 1))
3205 gen_mov_vreg_F0(dp, rd);
3209 /* Setup the next operands. */
3211 rd = ((rd + delta_d) & (bank_mask - 1))
3215 /* One source operand. */
3216 rm = ((rm + delta_m) & (bank_mask - 1))
3218 gen_mov_F0_vreg(dp, rm);
3220 /* Two source operands. */
3221 rn = ((rn + delta_d) & (bank_mask - 1))
3223 gen_mov_F0_vreg(dp, rn);
3225 rm = ((rm + delta_m) & (bank_mask - 1))
3227 gen_mov_F1_vreg(dp, rm);
3235 if ((insn & 0x03e00000) == 0x00400000) {
3236 /* two-register transfer */
3237 rn = (insn >> 16) & 0xf;
3238 rd = (insn >> 12) & 0xf;
3240 VFP_DREG_M(rm, insn);
3242 rm = VFP_SREG_M(insn);
3245 if (insn & ARM_CP_RW_BIT) {
3248 gen_mov_F0_vreg(0, rm * 2);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rd, tmp);
3251 gen_mov_F0_vreg(0, rm * 2 + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3255 gen_mov_F0_vreg(0, rm);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rd, tmp);
3258 gen_mov_F0_vreg(0, rm + 1);
3259 tmp = gen_vfp_mrs();
3260 store_reg(s, rn, tmp);
3265 tmp = load_reg(s, rd);
3267 gen_mov_vreg_F0(0, rm * 2);
3268 tmp = load_reg(s, rn);
3270 gen_mov_vreg_F0(0, rm * 2 + 1);
3272 tmp = load_reg(s, rd);
3274 gen_mov_vreg_F0(0, rm);
3275 tmp = load_reg(s, rn);
3277 gen_mov_vreg_F0(0, rm + 1);
3282 rn = (insn >> 16) & 0xf;
3284 VFP_DREG_D(rd, insn);
3286 rd = VFP_SREG_D(insn);
3287 if ((insn & 0x01200000) == 0x01000000) {
3288 /* Single load/store */
3289 offset = (insn & 0xff) << 2;
3290 if ((insn & (1 << 23)) == 0)
3292 if (s->thumb && rn == 15) {
3293 /* This is actually UNPREDICTABLE */
3294 addr = tcg_temp_new_i32();
3295 tcg_gen_movi_i32(addr, s->pc & ~2);
3297 addr = load_reg(s, rn);
3299 tcg_gen_addi_i32(addr, addr, offset);
3300 if (insn & (1 << 20)) {
3301 gen_vfp_ld(s, dp, addr);
3302 gen_mov_vreg_F0(dp, rd);
3304 gen_mov_F0_vreg(dp, rd);
3305 gen_vfp_st(s, dp, addr);
3307 tcg_temp_free_i32(addr);
3309 /* load/store multiple */
3310 int w = insn & (1 << 21);
3312 n = (insn >> 1) & 0x7f;
3316 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3317 /* P == U , W == 1 => UNDEF */
3320 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3321 /* UNPREDICTABLE cases for bad immediates: we choose to
3322 * UNDEF to avoid generating huge numbers of TCG ops
3326 if (rn == 15 && w) {
3327 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3331 if (s->thumb && rn == 15) {
3332 /* This is actually UNPREDICTABLE */
3333 addr = tcg_temp_new_i32();
3334 tcg_gen_movi_i32(addr, s->pc & ~2);
3336 addr = load_reg(s, rn);
3338 if (insn & (1 << 24)) /* pre-decrement */
3339 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3345 for (i = 0; i < n; i++) {
3346 if (insn & ARM_CP_RW_BIT) {
3348 gen_vfp_ld(s, dp, addr);
3349 gen_mov_vreg_F0(dp, rd + i);
3352 gen_mov_F0_vreg(dp, rd + i);
3353 gen_vfp_st(s, dp, addr);
3355 tcg_gen_addi_i32(addr, addr, offset);
3359 if (insn & (1 << 24))
3360 offset = -offset * n;
3361 else if (dp && (insn & 1))
3367 tcg_gen_addi_i32(addr, addr, offset);
3368 store_reg(s, rn, addr);
3370 tcg_temp_free_i32(addr);
3376 /* Should never happen. */
3382 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3384 TranslationBlock *tb;
3387 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3389 gen_set_pc_im(dest);
3390 tcg_gen_exit_tb((tcg_target_long)tb + n);
3392 gen_set_pc_im(dest);
3397 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3399 if (unlikely(s->singlestep_enabled)) {
3400 /* An indirect jump so that we still trigger the debug exception. */
3405 gen_goto_tb(s, 0, dest);
3406 s->is_jmp = DISAS_TB_JUMP;
3410 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3413 tcg_gen_sari_i32(t0, t0, 16);
3417 tcg_gen_sari_i32(t1, t1, 16);
3420 tcg_gen_mul_i32(t0, t0, t1);
3423 /* Return the mask of PSR bits set by a MSR instruction. */
3424 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3428 if (flags & (1 << 0))
3430 if (flags & (1 << 1))
3432 if (flags & (1 << 2))
3434 if (flags & (1 << 3))
3437 /* Mask out undefined bits. */
3438 mask &= ~CPSR_RESERVED;
3439 if (!arm_feature(env, ARM_FEATURE_V4T))
3441 if (!arm_feature(env, ARM_FEATURE_V5))
3442 mask &= ~CPSR_Q; /* V5TE in reality*/
3443 if (!arm_feature(env, ARM_FEATURE_V6))
3444 mask &= ~(CPSR_E | CPSR_GE);
3445 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3447 /* Mask out execution state bits. */
3450 /* Mask out privileged bits. */
3456 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3457 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3461 /* ??? This is also undefined in system mode. */
3465 tmp = load_cpu_field(spsr);
3466 tcg_gen_andi_i32(tmp, tmp, ~mask);
3467 tcg_gen_andi_i32(t0, t0, mask);
3468 tcg_gen_or_i32(tmp, tmp, t0);
3469 store_cpu_field(tmp, spsr);
3471 gen_set_cpsr(t0, mask);
3473 tcg_temp_free_i32(t0);
3478 /* Returns nonzero if access to the PSR is not permitted. */
3479 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3482 tmp = tcg_temp_new_i32();
3483 tcg_gen_movi_i32(tmp, val);
3484 return gen_set_psr(s, mask, spsr, tmp);
3487 /* Generate an old-style exception return. Marks pc as dead. */
3488 static void gen_exception_return(DisasContext *s, TCGv pc)
3491 store_reg(s, 15, pc);
3492 tmp = load_cpu_field(spsr);
3493 gen_set_cpsr(tmp, 0xffffffff);
3494 tcg_temp_free_i32(tmp);
3495 s->is_jmp = DISAS_UPDATE;
3498 /* Generate a v6 exception return. Marks both values as dead. */
3499 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3501 gen_set_cpsr(cpsr, 0xffffffff);
3502 tcg_temp_free_i32(cpsr);
3503 store_reg(s, 15, pc);
3504 s->is_jmp = DISAS_UPDATE;
3508 gen_set_condexec (DisasContext *s)
3510 if (s->condexec_mask) {
3511 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3512 TCGv tmp = tcg_temp_new_i32();
3513 tcg_gen_movi_i32(tmp, val);
3514 store_cpu_field(tmp, condexec_bits);
3518 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3520 gen_set_condexec(s);
3521 gen_set_pc_im(s->pc - offset);
3522 gen_exception(excp);
3523 s->is_jmp = DISAS_JUMP;
3526 static void gen_nop_hint(DisasContext *s, int val)
3530 gen_set_pc_im(s->pc);
3531 s->is_jmp = DISAS_WFI;
3535 /* TODO: Implement SEV and WFE. May help SMP performance. */
3541 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3543 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3546 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3547 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3548 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3553 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3556 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3557 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3558 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3563 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3564 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3565 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3566 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3567 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3569 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3570 switch ((size << 1) | u) { \
3572 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3575 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3578 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3581 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3584 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3587 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3589 default: return 1; \
3592 #define GEN_NEON_INTEGER_OP(name) do { \
3593 switch ((size << 1) | u) { \
3595 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3598 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3601 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3604 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3607 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3610 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3612 default: return 1; \
3615 static TCGv neon_load_scratch(int scratch)
3617 TCGv tmp = tcg_temp_new_i32();
3618 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3622 static void neon_store_scratch(int scratch, TCGv var)
3624 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3625 tcg_temp_free_i32(var);
3628 static inline TCGv neon_get_scalar(int size, int reg)
3632 tmp = neon_load_reg(reg & 7, reg >> 4);
3634 gen_neon_dup_high16(tmp);
3636 gen_neon_dup_low16(tmp);
3639 tmp = neon_load_reg(reg & 15, reg >> 4);
3644 static int gen_neon_unzip(int rd, int rm, int size, int q)
3647 if (!q && size == 2) {
3650 tmp = tcg_const_i32(rd);
3651 tmp2 = tcg_const_i32(rm);
3655 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3658 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3661 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3669 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3672 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3678 tcg_temp_free_i32(tmp);
3679 tcg_temp_free_i32(tmp2);
3683 static int gen_neon_zip(int rd, int rm, int size, int q)
3686 if (!q && size == 2) {
3689 tmp = tcg_const_i32(rd);
3690 tmp2 = tcg_const_i32(rm);
3694 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3697 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3700 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3708 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3711 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3717 tcg_temp_free_i32(tmp);
3718 tcg_temp_free_i32(tmp2);
3722 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3726 rd = tcg_temp_new_i32();
3727 tmp = tcg_temp_new_i32();
3729 tcg_gen_shli_i32(rd, t0, 8);
3730 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3731 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3732 tcg_gen_or_i32(rd, rd, tmp);
3734 tcg_gen_shri_i32(t1, t1, 8);
3735 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3736 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3737 tcg_gen_or_i32(t1, t1, tmp);
3738 tcg_gen_mov_i32(t0, rd);
3740 tcg_temp_free_i32(tmp);
3741 tcg_temp_free_i32(rd);
3744 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3748 rd = tcg_temp_new_i32();
3749 tmp = tcg_temp_new_i32();
3751 tcg_gen_shli_i32(rd, t0, 16);
3752 tcg_gen_andi_i32(tmp, t1, 0xffff);
3753 tcg_gen_or_i32(rd, rd, tmp);
3754 tcg_gen_shri_i32(t1, t1, 16);
3755 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3756 tcg_gen_or_i32(t1, t1, tmp);
3757 tcg_gen_mov_i32(t0, rd);
3759 tcg_temp_free_i32(tmp);
3760 tcg_temp_free_i32(rd);
3768 } neon_ls_element_type[11] = {
3782 /* Translate a NEON load/store element instruction. Return nonzero if the
3783 instruction is invalid. */
3784 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3803 if (!s->vfp_enabled)
3805 VFP_DREG_D(rd, insn);
3806 rn = (insn >> 16) & 0xf;
3808 load = (insn & (1 << 21)) != 0;
3809 if ((insn & (1 << 23)) == 0) {
3810 /* Load store all elements. */
3811 op = (insn >> 8) & 0xf;
3812 size = (insn >> 6) & 3;
3815 /* Catch UNDEF cases for bad values of align field */
3818 if (((insn >> 5) & 1) == 1) {
3823 if (((insn >> 4) & 3) == 3) {
3830 nregs = neon_ls_element_type[op].nregs;
3831 interleave = neon_ls_element_type[op].interleave;
3832 spacing = neon_ls_element_type[op].spacing;
3833 if (size == 3 && (interleave | spacing) != 1)
3835 addr = tcg_temp_new_i32();
3836 load_reg_var(s, addr, rn);
3837 stride = (1 << size) * interleave;
3838 for (reg = 0; reg < nregs; reg++) {
3839 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3840 load_reg_var(s, addr, rn);
3841 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3842 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3843 load_reg_var(s, addr, rn);
3844 tcg_gen_addi_i32(addr, addr, 1 << size);
3848 tmp64 = gen_ld64(addr, IS_USER(s));
3849 neon_store_reg64(tmp64, rd);
3850 tcg_temp_free_i64(tmp64);
3852 tmp64 = tcg_temp_new_i64();
3853 neon_load_reg64(tmp64, rd);
3854 gen_st64(tmp64, addr, IS_USER(s));
3856 tcg_gen_addi_i32(addr, addr, stride);
3858 for (pass = 0; pass < 2; pass++) {
3861 tmp = gen_ld32(addr, IS_USER(s));
3862 neon_store_reg(rd, pass, tmp);
3864 tmp = neon_load_reg(rd, pass);
3865 gen_st32(tmp, addr, IS_USER(s));
3867 tcg_gen_addi_i32(addr, addr, stride);
3868 } else if (size == 1) {
3870 tmp = gen_ld16u(addr, IS_USER(s));
3871 tcg_gen_addi_i32(addr, addr, stride);
3872 tmp2 = gen_ld16u(addr, IS_USER(s));
3873 tcg_gen_addi_i32(addr, addr, stride);
3874 tcg_gen_shli_i32(tmp2, tmp2, 16);
3875 tcg_gen_or_i32(tmp, tmp, tmp2);
3876 tcg_temp_free_i32(tmp2);
3877 neon_store_reg(rd, pass, tmp);
3879 tmp = neon_load_reg(rd, pass);
3880 tmp2 = tcg_temp_new_i32();
3881 tcg_gen_shri_i32(tmp2, tmp, 16);
3882 gen_st16(tmp, addr, IS_USER(s));
3883 tcg_gen_addi_i32(addr, addr, stride);
3884 gen_st16(tmp2, addr, IS_USER(s));
3885 tcg_gen_addi_i32(addr, addr, stride);
3887 } else /* size == 0 */ {
3890 for (n = 0; n < 4; n++) {
3891 tmp = gen_ld8u(addr, IS_USER(s));
3892 tcg_gen_addi_i32(addr, addr, stride);
3896 tcg_gen_shli_i32(tmp, tmp, n * 8);
3897 tcg_gen_or_i32(tmp2, tmp2, tmp);
3898 tcg_temp_free_i32(tmp);
3901 neon_store_reg(rd, pass, tmp2);
3903 tmp2 = neon_load_reg(rd, pass);
3904 for (n = 0; n < 4; n++) {
3905 tmp = tcg_temp_new_i32();
3907 tcg_gen_mov_i32(tmp, tmp2);
3909 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3911 gen_st8(tmp, addr, IS_USER(s));
3912 tcg_gen_addi_i32(addr, addr, stride);
3914 tcg_temp_free_i32(tmp2);
3921 tcg_temp_free_i32(addr);
3924 size = (insn >> 10) & 3;
3926 /* Load single element to all lanes. */
3927 int a = (insn >> 4) & 1;
3931 size = (insn >> 6) & 3;
3932 nregs = ((insn >> 8) & 3) + 1;
3935 if (nregs != 4 || a == 0) {
3938 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3941 if (nregs == 1 && a == 1 && size == 0) {
3944 if (nregs == 3 && a == 1) {
3947 addr = tcg_temp_new_i32();
3948 load_reg_var(s, addr, rn);
3950 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3951 tmp = gen_load_and_replicate(s, addr, size);
3952 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3953 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3954 if (insn & (1 << 5)) {
3955 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3956 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3958 tcg_temp_free_i32(tmp);
3960 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3961 stride = (insn & (1 << 5)) ? 2 : 1;
3962 for (reg = 0; reg < nregs; reg++) {
3963 tmp = gen_load_and_replicate(s, addr, size);
3964 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3965 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3966 tcg_temp_free_i32(tmp);
3967 tcg_gen_addi_i32(addr, addr, 1 << size);
3971 tcg_temp_free_i32(addr);
3972 stride = (1 << size) * nregs;
3974 /* Single element. */
3975 int idx = (insn >> 4) & 0xf;
3976 pass = (insn >> 7) & 1;
3979 shift = ((insn >> 5) & 3) * 8;
3983 shift = ((insn >> 6) & 1) * 16;
3984 stride = (insn & (1 << 5)) ? 2 : 1;
3988 stride = (insn & (1 << 6)) ? 2 : 1;
3993 nregs = ((insn >> 8) & 3) + 1;
3994 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3997 if (((idx & (1 << size)) != 0) ||
3998 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4003 if ((idx & 1) != 0) {
4008 if (size == 2 && (idx & 2) != 0) {
4013 if ((size == 2) && ((idx & 3) == 3)) {
4020 if ((rd + stride * (nregs - 1)) > 31) {
4021 /* Attempts to write off the end of the register file
4022 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4023 * the neon_load_reg() would write off the end of the array.
4027 addr = tcg_temp_new_i32();
4028 load_reg_var(s, addr, rn);
4029 for (reg = 0; reg < nregs; reg++) {
4033 tmp = gen_ld8u(addr, IS_USER(s));
4036 tmp = gen_ld16u(addr, IS_USER(s));
4039 tmp = gen_ld32(addr, IS_USER(s));
4041 default: /* Avoid compiler warnings. */
4045 tmp2 = neon_load_reg(rd, pass);
4046 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4047 tcg_temp_free_i32(tmp2);
4049 neon_store_reg(rd, pass, tmp);
4050 } else { /* Store */
4051 tmp = neon_load_reg(rd, pass);
4053 tcg_gen_shri_i32(tmp, tmp, shift);
4056 gen_st8(tmp, addr, IS_USER(s));
4059 gen_st16(tmp, addr, IS_USER(s));
4062 gen_st32(tmp, addr, IS_USER(s));
4067 tcg_gen_addi_i32(addr, addr, 1 << size);
4069 tcg_temp_free_i32(addr);
4070 stride = nregs * (1 << size);
4076 base = load_reg(s, rn);
4078 tcg_gen_addi_i32(base, base, stride);
4081 index = load_reg(s, rm);
4082 tcg_gen_add_i32(base, base, index);
4083 tcg_temp_free_i32(index);
4085 store_reg(s, rn, base);
4090 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4091 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4093 tcg_gen_and_i32(t, t, c);
4094 tcg_gen_andc_i32(f, f, c);
4095 tcg_gen_or_i32(dest, t, f);
4098 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4101 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4102 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4103 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4108 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4111 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4112 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4113 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4118 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4121 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4122 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4123 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4128 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4131 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4132 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4133 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4138 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4144 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4145 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4150 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4151 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4158 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4159 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4164 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4165 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4172 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4176 case 0: gen_helper_neon_widen_u8(dest, src); break;
4177 case 1: gen_helper_neon_widen_u16(dest, src); break;
4178 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4183 case 0: gen_helper_neon_widen_s8(dest, src); break;
4184 case 1: gen_helper_neon_widen_s16(dest, src); break;
4185 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4189 tcg_temp_free_i32(src);
4192 static inline void gen_neon_addl(int size)
4195 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4196 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4197 case 2: tcg_gen_add_i64(CPU_V001); break;
4202 static inline void gen_neon_subl(int size)
4205 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4206 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4207 case 2: tcg_gen_sub_i64(CPU_V001); break;
4212 static inline void gen_neon_negl(TCGv_i64 var, int size)
4215 case 0: gen_helper_neon_negl_u16(var, var); break;
4216 case 1: gen_helper_neon_negl_u32(var, var); break;
4217 case 2: gen_helper_neon_negl_u64(var, var); break;
4222 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4225 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4226 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4231 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4235 switch ((size << 1) | u) {
4236 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4237 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4238 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4239 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4241 tmp = gen_muls_i64_i32(a, b);
4242 tcg_gen_mov_i64(dest, tmp);
4243 tcg_temp_free_i64(tmp);
4246 tmp = gen_mulu_i64_i32(a, b);
4247 tcg_gen_mov_i64(dest, tmp);
4248 tcg_temp_free_i64(tmp);
4253 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4254 Don't forget to clean them now. */
4256 tcg_temp_free_i32(a);
4257 tcg_temp_free_i32(b);
4261 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4265 gen_neon_unarrow_sats(size, dest, src);
4267 gen_neon_narrow(size, dest, src);
4271 gen_neon_narrow_satu(size, dest, src);
4273 gen_neon_narrow_sats(size, dest, src);
4278 /* Symbolic constants for op fields for Neon 3-register same-length.
4279 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4282 #define NEON_3R_VHADD 0
4283 #define NEON_3R_VQADD 1
4284 #define NEON_3R_VRHADD 2
4285 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4286 #define NEON_3R_VHSUB 4
4287 #define NEON_3R_VQSUB 5
4288 #define NEON_3R_VCGT 6
4289 #define NEON_3R_VCGE 7
4290 #define NEON_3R_VSHL 8
4291 #define NEON_3R_VQSHL 9
4292 #define NEON_3R_VRSHL 10
4293 #define NEON_3R_VQRSHL 11
4294 #define NEON_3R_VMAX 12
4295 #define NEON_3R_VMIN 13
4296 #define NEON_3R_VABD 14
4297 #define NEON_3R_VABA 15
4298 #define NEON_3R_VADD_VSUB 16
4299 #define NEON_3R_VTST_VCEQ 17
4300 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4301 #define NEON_3R_VMUL 19
4302 #define NEON_3R_VPMAX 20
4303 #define NEON_3R_VPMIN 21
4304 #define NEON_3R_VQDMULH_VQRDMULH 22
4305 #define NEON_3R_VPADD 23
4306 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4307 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4308 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4309 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4310 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4311 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4312 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4314 static const uint8_t neon_3r_sizes[] = {
4315 [NEON_3R_VHADD] = 0x7,
4316 [NEON_3R_VQADD] = 0xf,
4317 [NEON_3R_VRHADD] = 0x7,
4318 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4319 [NEON_3R_VHSUB] = 0x7,
4320 [NEON_3R_VQSUB] = 0xf,
4321 [NEON_3R_VCGT] = 0x7,
4322 [NEON_3R_VCGE] = 0x7,
4323 [NEON_3R_VSHL] = 0xf,
4324 [NEON_3R_VQSHL] = 0xf,
4325 [NEON_3R_VRSHL] = 0xf,
4326 [NEON_3R_VQRSHL] = 0xf,
4327 [NEON_3R_VMAX] = 0x7,
4328 [NEON_3R_VMIN] = 0x7,
4329 [NEON_3R_VABD] = 0x7,
4330 [NEON_3R_VABA] = 0x7,
4331 [NEON_3R_VADD_VSUB] = 0xf,
4332 [NEON_3R_VTST_VCEQ] = 0x7,
4333 [NEON_3R_VML] = 0x7,
4334 [NEON_3R_VMUL] = 0x7,
4335 [NEON_3R_VPMAX] = 0x7,
4336 [NEON_3R_VPMIN] = 0x7,
4337 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4338 [NEON_3R_VPADD] = 0x7,
4339 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4340 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4341 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4342 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4343 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4344 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4345 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4348 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4349 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4352 #define NEON_2RM_VREV64 0
4353 #define NEON_2RM_VREV32 1
4354 #define NEON_2RM_VREV16 2
4355 #define NEON_2RM_VPADDL 4
4356 #define NEON_2RM_VPADDL_U 5
4357 #define NEON_2RM_VCLS 8
4358 #define NEON_2RM_VCLZ 9
4359 #define NEON_2RM_VCNT 10
4360 #define NEON_2RM_VMVN 11
4361 #define NEON_2RM_VPADAL 12
4362 #define NEON_2RM_VPADAL_U 13
4363 #define NEON_2RM_VQABS 14
4364 #define NEON_2RM_VQNEG 15
4365 #define NEON_2RM_VCGT0 16
4366 #define NEON_2RM_VCGE0 17
4367 #define NEON_2RM_VCEQ0 18
4368 #define NEON_2RM_VCLE0 19
4369 #define NEON_2RM_VCLT0 20
4370 #define NEON_2RM_VABS 22
4371 #define NEON_2RM_VNEG 23
4372 #define NEON_2RM_VCGT0_F 24
4373 #define NEON_2RM_VCGE0_F 25
4374 #define NEON_2RM_VCEQ0_F 26
4375 #define NEON_2RM_VCLE0_F 27
4376 #define NEON_2RM_VCLT0_F 28
4377 #define NEON_2RM_VABS_F 30
4378 #define NEON_2RM_VNEG_F 31
4379 #define NEON_2RM_VSWP 32
4380 #define NEON_2RM_VTRN 33
4381 #define NEON_2RM_VUZP 34
4382 #define NEON_2RM_VZIP 35
4383 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4384 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4385 #define NEON_2RM_VSHLL 38
4386 #define NEON_2RM_VCVT_F16_F32 44
4387 #define NEON_2RM_VCVT_F32_F16 46
4388 #define NEON_2RM_VRECPE 56
4389 #define NEON_2RM_VRSQRTE 57
4390 #define NEON_2RM_VRECPE_F 58
4391 #define NEON_2RM_VRSQRTE_F 59
4392 #define NEON_2RM_VCVT_FS 60
4393 #define NEON_2RM_VCVT_FU 61
4394 #define NEON_2RM_VCVT_SF 62
4395 #define NEON_2RM_VCVT_UF 63
4397 static int neon_2rm_is_float_op(int op)
4399 /* Return true if this neon 2reg-misc op is float-to-float */
4400 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4401 op >= NEON_2RM_VRECPE_F);
4404 /* Each entry in this array has bit n set if the insn allows
4405 * size value n (otherwise it will UNDEF). Since unallocated
4406 * op values will have no bits set they always UNDEF.
4408 static const uint8_t neon_2rm_sizes[] = {
4409 [NEON_2RM_VREV64] = 0x7,
4410 [NEON_2RM_VREV32] = 0x3,
4411 [NEON_2RM_VREV16] = 0x1,
4412 [NEON_2RM_VPADDL] = 0x7,
4413 [NEON_2RM_VPADDL_U] = 0x7,
4414 [NEON_2RM_VCLS] = 0x7,
4415 [NEON_2RM_VCLZ] = 0x7,
4416 [NEON_2RM_VCNT] = 0x1,
4417 [NEON_2RM_VMVN] = 0x1,
4418 [NEON_2RM_VPADAL] = 0x7,
4419 [NEON_2RM_VPADAL_U] = 0x7,
4420 [NEON_2RM_VQABS] = 0x7,
4421 [NEON_2RM_VQNEG] = 0x7,
4422 [NEON_2RM_VCGT0] = 0x7,
4423 [NEON_2RM_VCGE0] = 0x7,
4424 [NEON_2RM_VCEQ0] = 0x7,
4425 [NEON_2RM_VCLE0] = 0x7,
4426 [NEON_2RM_VCLT0] = 0x7,
4427 [NEON_2RM_VABS] = 0x7,
4428 [NEON_2RM_VNEG] = 0x7,
4429 [NEON_2RM_VCGT0_F] = 0x4,
4430 [NEON_2RM_VCGE0_F] = 0x4,
4431 [NEON_2RM_VCEQ0_F] = 0x4,
4432 [NEON_2RM_VCLE0_F] = 0x4,
4433 [NEON_2RM_VCLT0_F] = 0x4,
4434 [NEON_2RM_VABS_F] = 0x4,
4435 [NEON_2RM_VNEG_F] = 0x4,
4436 [NEON_2RM_VSWP] = 0x1,
4437 [NEON_2RM_VTRN] = 0x7,
4438 [NEON_2RM_VUZP] = 0x7,
4439 [NEON_2RM_VZIP] = 0x7,
4440 [NEON_2RM_VMOVN] = 0x7,
4441 [NEON_2RM_VQMOVN] = 0x7,
4442 [NEON_2RM_VSHLL] = 0x7,
4443 [NEON_2RM_VCVT_F16_F32] = 0x2,
4444 [NEON_2RM_VCVT_F32_F16] = 0x2,
4445 [NEON_2RM_VRECPE] = 0x4,
4446 [NEON_2RM_VRSQRTE] = 0x4,
4447 [NEON_2RM_VRECPE_F] = 0x4,
4448 [NEON_2RM_VRSQRTE_F] = 0x4,
4449 [NEON_2RM_VCVT_FS] = 0x4,
4450 [NEON_2RM_VCVT_FU] = 0x4,
4451 [NEON_2RM_VCVT_SF] = 0x4,
4452 [NEON_2RM_VCVT_UF] = 0x4,
4455 /* Translate a NEON data processing instruction. Return nonzero if the
4456 instruction is invalid.
4457 We process data in a mixture of 32-bit and 64-bit chunks.
4458 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4460 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4472 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4475 if (!s->vfp_enabled)
4477 q = (insn & (1 << 6)) != 0;
4478 u = (insn >> 24) & 1;
4479 VFP_DREG_D(rd, insn);
4480 VFP_DREG_N(rn, insn);
4481 VFP_DREG_M(rm, insn);
4482 size = (insn >> 20) & 3;
4483 if ((insn & (1 << 23)) == 0) {
4484 /* Three register same length. */
4485 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4486 /* Catch invalid op and bad size combinations: UNDEF */
4487 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4490 /* All insns of this form UNDEF for either this condition or the
4491 * superset of cases "Q==1"; we catch the latter later.
4493 if (q && ((rd | rn | rm) & 1)) {
4496 if (size == 3 && op != NEON_3R_LOGIC) {
4497 /* 64-bit element instructions. */
4498 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4499 neon_load_reg64(cpu_V0, rn + pass);
4500 neon_load_reg64(cpu_V1, rm + pass);
4504 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4507 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4513 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4516 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4522 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4524 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4529 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4532 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4538 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4540 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4543 case NEON_3R_VQRSHL:
4545 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4548 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4552 case NEON_3R_VADD_VSUB:
4554 tcg_gen_sub_i64(CPU_V001);
4556 tcg_gen_add_i64(CPU_V001);
4562 neon_store_reg64(cpu_V0, rd + pass);
4571 case NEON_3R_VQRSHL:
4574 /* Shift instruction operands are reversed. */
4589 case NEON_3R_FLOAT_ARITH:
4590 pairwise = (u && size < 2); /* if VPADD (float) */
4592 case NEON_3R_FLOAT_MINMAX:
4593 pairwise = u; /* if VPMIN/VPMAX (float) */
4595 case NEON_3R_FLOAT_CMP:
4597 /* no encoding for U=0 C=1x */
4601 case NEON_3R_FLOAT_ACMP:
4606 case NEON_3R_VRECPS_VRSQRTS:
4612 if (u && (size != 0)) {
4613 /* UNDEF on invalid size for polynomial subcase */
4618 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4626 if (pairwise && q) {
4627 /* All the pairwise insns UNDEF if Q is set */
4631 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4636 tmp = neon_load_reg(rn, 0);
4637 tmp2 = neon_load_reg(rn, 1);
4639 tmp = neon_load_reg(rm, 0);
4640 tmp2 = neon_load_reg(rm, 1);
4644 tmp = neon_load_reg(rn, pass);
4645 tmp2 = neon_load_reg(rm, pass);
4649 GEN_NEON_INTEGER_OP(hadd);
4652 GEN_NEON_INTEGER_OP_ENV(qadd);
4654 case NEON_3R_VRHADD:
4655 GEN_NEON_INTEGER_OP(rhadd);
4657 case NEON_3R_LOGIC: /* Logic ops. */
4658 switch ((u << 2) | size) {
4660 tcg_gen_and_i32(tmp, tmp, tmp2);
4663 tcg_gen_andc_i32(tmp, tmp, tmp2);
4666 tcg_gen_or_i32(tmp, tmp, tmp2);
4669 tcg_gen_orc_i32(tmp, tmp, tmp2);
4672 tcg_gen_xor_i32(tmp, tmp, tmp2);
4675 tmp3 = neon_load_reg(rd, pass);
4676 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4677 tcg_temp_free_i32(tmp3);
4680 tmp3 = neon_load_reg(rd, pass);
4681 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4682 tcg_temp_free_i32(tmp3);
4685 tmp3 = neon_load_reg(rd, pass);
4686 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4687 tcg_temp_free_i32(tmp3);
4692 GEN_NEON_INTEGER_OP(hsub);
4695 GEN_NEON_INTEGER_OP_ENV(qsub);
4698 GEN_NEON_INTEGER_OP(cgt);
4701 GEN_NEON_INTEGER_OP(cge);
4704 GEN_NEON_INTEGER_OP(shl);
4707 GEN_NEON_INTEGER_OP_ENV(qshl);
4710 GEN_NEON_INTEGER_OP(rshl);
4712 case NEON_3R_VQRSHL:
4713 GEN_NEON_INTEGER_OP_ENV(qrshl);
4716 GEN_NEON_INTEGER_OP(max);
4719 GEN_NEON_INTEGER_OP(min);
4722 GEN_NEON_INTEGER_OP(abd);
4725 GEN_NEON_INTEGER_OP(abd);
4726 tcg_temp_free_i32(tmp2);
4727 tmp2 = neon_load_reg(rd, pass);
4728 gen_neon_add(size, tmp, tmp2);
4730 case NEON_3R_VADD_VSUB:
4731 if (!u) { /* VADD */
4732 gen_neon_add(size, tmp, tmp2);
4735 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4736 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4737 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4742 case NEON_3R_VTST_VCEQ:
4743 if (!u) { /* VTST */
4745 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4746 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4747 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4752 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4753 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4754 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4759 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4761 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4762 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4763 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4766 tcg_temp_free_i32(tmp2);
4767 tmp2 = neon_load_reg(rd, pass);
4769 gen_neon_rsb(size, tmp, tmp2);
4771 gen_neon_add(size, tmp, tmp2);
4775 if (u) { /* polynomial */
4776 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4777 } else { /* Integer */
4779 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4780 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4781 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4787 GEN_NEON_INTEGER_OP(pmax);
4790 GEN_NEON_INTEGER_OP(pmin);
4792 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4793 if (!u) { /* VQDMULH */
4796 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4799 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4803 } else { /* VQRDMULH */
4806 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4809 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4817 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4818 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4819 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4823 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4825 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4826 switch ((u << 2) | size) {
4829 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4832 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4835 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4840 tcg_temp_free_ptr(fpstatus);
4843 case NEON_3R_FLOAT_MULTIPLY:
4845 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4846 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4848 tcg_temp_free_i32(tmp2);
4849 tmp2 = neon_load_reg(rd, pass);
4851 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4853 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4856 tcg_temp_free_ptr(fpstatus);
4859 case NEON_3R_FLOAT_CMP:
4861 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4863 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4866 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4868 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4871 tcg_temp_free_ptr(fpstatus);
4874 case NEON_3R_FLOAT_ACMP:
4876 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4878 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4880 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4882 tcg_temp_free_ptr(fpstatus);
4885 case NEON_3R_FLOAT_MINMAX:
4887 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4889 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4891 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4893 tcg_temp_free_ptr(fpstatus);
4896 case NEON_3R_VRECPS_VRSQRTS:
4898 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4900 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4904 /* VFMA, VFMS: fused multiply-add */
4905 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4906 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4909 gen_helper_vfp_negs(tmp, tmp);
4911 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4912 tcg_temp_free_i32(tmp3);
4913 tcg_temp_free_ptr(fpstatus);
4919 tcg_temp_free_i32(tmp2);
4921 /* Save the result. For elementwise operations we can put it
4922 straight into the destination register. For pairwise operations
4923 we have to be careful to avoid clobbering the source operands. */
4924 if (pairwise && rd == rm) {
4925 neon_store_scratch(pass, tmp);
4927 neon_store_reg(rd, pass, tmp);
4931 if (pairwise && rd == rm) {
4932 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4933 tmp = neon_load_scratch(pass);
4934 neon_store_reg(rd, pass, tmp);
4937 /* End of 3 register same size operations. */
4938 } else if (insn & (1 << 4)) {
4939 if ((insn & 0x00380080) != 0) {
4940 /* Two registers and shift. */
4941 op = (insn >> 8) & 0xf;
4942 if (insn & (1 << 7)) {
4950 while ((insn & (1 << (size + 19))) == 0)
4953 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4954 /* To avoid excessive dumplication of ops we implement shift
4955 by immediate using the variable shift operations. */
4957 /* Shift by immediate:
4958 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4959 if (q && ((rd | rm) & 1)) {
4962 if (!u && (op == 4 || op == 6)) {
4965 /* Right shifts are encoded as N - shift, where N is the
4966 element size in bits. */
4968 shift = shift - (1 << (size + 3));
4976 imm = (uint8_t) shift;
4981 imm = (uint16_t) shift;
4992 for (pass = 0; pass < count; pass++) {
4994 neon_load_reg64(cpu_V0, rm + pass);
4995 tcg_gen_movi_i64(cpu_V1, imm);
5000 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5002 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5007 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5009 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5012 case 5: /* VSHL, VSLI */
5013 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5015 case 6: /* VQSHLU */
5016 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5021 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5024 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5029 if (op == 1 || op == 3) {
5031 neon_load_reg64(cpu_V1, rd + pass);
5032 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5033 } else if (op == 4 || (op == 5 && u)) {
5035 neon_load_reg64(cpu_V1, rd + pass);
5037 if (shift < -63 || shift > 63) {
5041 mask = 0xffffffffffffffffull >> -shift;
5043 mask = 0xffffffffffffffffull << shift;
5046 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5047 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5049 neon_store_reg64(cpu_V0, rd + pass);
5050 } else { /* size < 3 */
5051 /* Operands in T0 and T1. */
5052 tmp = neon_load_reg(rm, pass);
5053 tmp2 = tcg_temp_new_i32();
5054 tcg_gen_movi_i32(tmp2, imm);
5058 GEN_NEON_INTEGER_OP(shl);
5062 GEN_NEON_INTEGER_OP(rshl);
5065 case 5: /* VSHL, VSLI */
5067 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5068 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5069 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5073 case 6: /* VQSHLU */
5076 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5080 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5084 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5092 GEN_NEON_INTEGER_OP_ENV(qshl);
5095 tcg_temp_free_i32(tmp2);
5097 if (op == 1 || op == 3) {
5099 tmp2 = neon_load_reg(rd, pass);
5100 gen_neon_add(size, tmp, tmp2);
5101 tcg_temp_free_i32(tmp2);
5102 } else if (op == 4 || (op == 5 && u)) {
5107 mask = 0xff >> -shift;
5109 mask = (uint8_t)(0xff << shift);
5115 mask = 0xffff >> -shift;
5117 mask = (uint16_t)(0xffff << shift);
5121 if (shift < -31 || shift > 31) {
5125 mask = 0xffffffffu >> -shift;
5127 mask = 0xffffffffu << shift;
5133 tmp2 = neon_load_reg(rd, pass);
5134 tcg_gen_andi_i32(tmp, tmp, mask);
5135 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5136 tcg_gen_or_i32(tmp, tmp, tmp2);
5137 tcg_temp_free_i32(tmp2);
5139 neon_store_reg(rd, pass, tmp);
5142 } else if (op < 10) {
5143 /* Shift by immediate and narrow:
5144 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5145 int input_unsigned = (op == 8) ? !u : u;
5149 shift = shift - (1 << (size + 3));
5152 tmp64 = tcg_const_i64(shift);
5153 neon_load_reg64(cpu_V0, rm);
5154 neon_load_reg64(cpu_V1, rm + 1);
5155 for (pass = 0; pass < 2; pass++) {
5163 if (input_unsigned) {
5164 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5166 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5169 if (input_unsigned) {
5170 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5172 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5175 tmp = tcg_temp_new_i32();
5176 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5177 neon_store_reg(rd, pass, tmp);
5179 tcg_temp_free_i64(tmp64);
5182 imm = (uint16_t)shift;
5186 imm = (uint32_t)shift;
5188 tmp2 = tcg_const_i32(imm);
5189 tmp4 = neon_load_reg(rm + 1, 0);
5190 tmp5 = neon_load_reg(rm + 1, 1);
5191 for (pass = 0; pass < 2; pass++) {
5193 tmp = neon_load_reg(rm, 0);
5197 gen_neon_shift_narrow(size, tmp, tmp2, q,
5200 tmp3 = neon_load_reg(rm, 1);
5204 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5206 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5207 tcg_temp_free_i32(tmp);
5208 tcg_temp_free_i32(tmp3);
5209 tmp = tcg_temp_new_i32();
5210 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5211 neon_store_reg(rd, pass, tmp);
5213 tcg_temp_free_i32(tmp2);
5215 } else if (op == 10) {
5217 if (q || (rd & 1)) {
5220 tmp = neon_load_reg(rm, 0);
5221 tmp2 = neon_load_reg(rm, 1);
5222 for (pass = 0; pass < 2; pass++) {
5226 gen_neon_widen(cpu_V0, tmp, size, u);
5229 /* The shift is less than the width of the source
5230 type, so we can just shift the whole register. */
5231 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5232 /* Widen the result of shift: we need to clear
5233 * the potential overflow bits resulting from
5234 * left bits of the narrow input appearing as
5235 * right bits of left the neighbour narrow
5237 if (size < 2 || !u) {
5240 imm = (0xffu >> (8 - shift));
5242 } else if (size == 1) {
5243 imm = 0xffff >> (16 - shift);
5246 imm = 0xffffffff >> (32 - shift);
5249 imm64 = imm | (((uint64_t)imm) << 32);
5253 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5256 neon_store_reg64(cpu_V0, rd + pass);
5258 } else if (op >= 14) {
5259 /* VCVT fixed-point. */
5260 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5263 /* We have already masked out the must-be-1 top bit of imm6,
5264 * hence this 32-shift where the ARM ARM has 64-imm6.
5267 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5268 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5271 gen_vfp_ulto(0, shift, 1);
5273 gen_vfp_slto(0, shift, 1);
5276 gen_vfp_toul(0, shift, 1);
5278 gen_vfp_tosl(0, shift, 1);
5280 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5285 } else { /* (insn & 0x00380080) == 0 */
5287 if (q && (rd & 1)) {
5291 op = (insn >> 8) & 0xf;
5292 /* One register and immediate. */
5293 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5294 invert = (insn & (1 << 5)) != 0;
5295 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5296 * We choose to not special-case this and will behave as if a
5297 * valid constant encoding of 0 had been given.
5316 imm = (imm << 8) | (imm << 24);
5319 imm = (imm << 8) | 0xff;
5322 imm = (imm << 16) | 0xffff;
5325 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5333 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5334 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5340 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5341 if (op & 1 && op < 12) {
5342 tmp = neon_load_reg(rd, pass);
5344 /* The immediate value has already been inverted, so
5346 tcg_gen_andi_i32(tmp, tmp, imm);
5348 tcg_gen_ori_i32(tmp, tmp, imm);
5352 tmp = tcg_temp_new_i32();
5353 if (op == 14 && invert) {
5357 for (n = 0; n < 4; n++) {
5358 if (imm & (1 << (n + (pass & 1) * 4)))
5359 val |= 0xff << (n * 8);
5361 tcg_gen_movi_i32(tmp, val);
5363 tcg_gen_movi_i32(tmp, imm);
5366 neon_store_reg(rd, pass, tmp);
5369 } else { /* (insn & 0x00800010 == 0x00800000) */
5371 op = (insn >> 8) & 0xf;
5372 if ((insn & (1 << 6)) == 0) {
5373 /* Three registers of different lengths. */
5377 /* undefreq: bit 0 : UNDEF if size != 0
5378 * bit 1 : UNDEF if size == 0
5379 * bit 2 : UNDEF if U == 1
5380 * Note that [1:0] set implies 'always UNDEF'
5383 /* prewiden, src1_wide, src2_wide, undefreq */
5384 static const int neon_3reg_wide[16][4] = {
5385 {1, 0, 0, 0}, /* VADDL */
5386 {1, 1, 0, 0}, /* VADDW */
5387 {1, 0, 0, 0}, /* VSUBL */
5388 {1, 1, 0, 0}, /* VSUBW */
5389 {0, 1, 1, 0}, /* VADDHN */
5390 {0, 0, 0, 0}, /* VABAL */
5391 {0, 1, 1, 0}, /* VSUBHN */
5392 {0, 0, 0, 0}, /* VABDL */
5393 {0, 0, 0, 0}, /* VMLAL */
5394 {0, 0, 0, 6}, /* VQDMLAL */
5395 {0, 0, 0, 0}, /* VMLSL */
5396 {0, 0, 0, 6}, /* VQDMLSL */
5397 {0, 0, 0, 0}, /* Integer VMULL */
5398 {0, 0, 0, 2}, /* VQDMULL */
5399 {0, 0, 0, 5}, /* Polynomial VMULL */
5400 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5403 prewiden = neon_3reg_wide[op][0];
5404 src1_wide = neon_3reg_wide[op][1];
5405 src2_wide = neon_3reg_wide[op][2];
5406 undefreq = neon_3reg_wide[op][3];
5408 if (((undefreq & 1) && (size != 0)) ||
5409 ((undefreq & 2) && (size == 0)) ||
5410 ((undefreq & 4) && u)) {
5413 if ((src1_wide && (rn & 1)) ||
5414 (src2_wide && (rm & 1)) ||
5415 (!src2_wide && (rd & 1))) {
5419 /* Avoid overlapping operands. Wide source operands are
5420 always aligned so will never overlap with wide
5421 destinations in problematic ways. */
5422 if (rd == rm && !src2_wide) {
5423 tmp = neon_load_reg(rm, 1);
5424 neon_store_scratch(2, tmp);
5425 } else if (rd == rn && !src1_wide) {
5426 tmp = neon_load_reg(rn, 1);
5427 neon_store_scratch(2, tmp);
5430 for (pass = 0; pass < 2; pass++) {
5432 neon_load_reg64(cpu_V0, rn + pass);
5435 if (pass == 1 && rd == rn) {
5436 tmp = neon_load_scratch(2);
5438 tmp = neon_load_reg(rn, pass);
5441 gen_neon_widen(cpu_V0, tmp, size, u);
5445 neon_load_reg64(cpu_V1, rm + pass);
5448 if (pass == 1 && rd == rm) {
5449 tmp2 = neon_load_scratch(2);
5451 tmp2 = neon_load_reg(rm, pass);
5454 gen_neon_widen(cpu_V1, tmp2, size, u);
5458 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5459 gen_neon_addl(size);
5461 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5462 gen_neon_subl(size);
5464 case 5: case 7: /* VABAL, VABDL */
5465 switch ((size << 1) | u) {
5467 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5470 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5473 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5476 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5479 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5482 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5486 tcg_temp_free_i32(tmp2);
5487 tcg_temp_free_i32(tmp);
5489 case 8: case 9: case 10: case 11: case 12: case 13:
5490 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5491 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5493 case 14: /* Polynomial VMULL */
5494 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5495 tcg_temp_free_i32(tmp2);
5496 tcg_temp_free_i32(tmp);
5498 default: /* 15 is RESERVED: caught earlier */
5503 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5504 neon_store_reg64(cpu_V0, rd + pass);
5505 } else if (op == 5 || (op >= 8 && op <= 11)) {
5507 neon_load_reg64(cpu_V1, rd + pass);
5509 case 10: /* VMLSL */
5510 gen_neon_negl(cpu_V0, size);
5512 case 5: case 8: /* VABAL, VMLAL */
5513 gen_neon_addl(size);
5515 case 9: case 11: /* VQDMLAL, VQDMLSL */
5516 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5518 gen_neon_negl(cpu_V0, size);
5520 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5525 neon_store_reg64(cpu_V0, rd + pass);
5526 } else if (op == 4 || op == 6) {
5527 /* Narrowing operation. */
5528 tmp = tcg_temp_new_i32();
5532 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5535 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5538 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5539 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5546 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5549 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5552 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5553 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5554 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5562 neon_store_reg(rd, 0, tmp3);
5563 neon_store_reg(rd, 1, tmp);
5566 /* Write back the result. */
5567 neon_store_reg64(cpu_V0, rd + pass);
5571 /* Two registers and a scalar. NB that for ops of this form
5572 * the ARM ARM labels bit 24 as Q, but it is in our variable
5579 case 1: /* Float VMLA scalar */
5580 case 5: /* Floating point VMLS scalar */
5581 case 9: /* Floating point VMUL scalar */
5586 case 0: /* Integer VMLA scalar */
5587 case 4: /* Integer VMLS scalar */
5588 case 8: /* Integer VMUL scalar */
5589 case 12: /* VQDMULH scalar */
5590 case 13: /* VQRDMULH scalar */
5591 if (u && ((rd | rn) & 1)) {
5594 tmp = neon_get_scalar(size, rm);
5595 neon_store_scratch(0, tmp);
5596 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5597 tmp = neon_load_scratch(0);
5598 tmp2 = neon_load_reg(rn, pass);
5601 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5603 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5605 } else if (op == 13) {
5607 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5609 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5611 } else if (op & 1) {
5612 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5613 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5614 tcg_temp_free_ptr(fpstatus);
5617 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5618 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5619 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5623 tcg_temp_free_i32(tmp2);
5626 tmp2 = neon_load_reg(rd, pass);
5629 gen_neon_add(size, tmp, tmp2);
5633 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5634 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5635 tcg_temp_free_ptr(fpstatus);
5639 gen_neon_rsb(size, tmp, tmp2);
5643 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5644 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5645 tcg_temp_free_ptr(fpstatus);
5651 tcg_temp_free_i32(tmp2);
5653 neon_store_reg(rd, pass, tmp);
5656 case 3: /* VQDMLAL scalar */
5657 case 7: /* VQDMLSL scalar */
5658 case 11: /* VQDMULL scalar */
5663 case 2: /* VMLAL sclar */
5664 case 6: /* VMLSL scalar */
5665 case 10: /* VMULL scalar */
5669 tmp2 = neon_get_scalar(size, rm);
5670 /* We need a copy of tmp2 because gen_neon_mull
5671 * deletes it during pass 0. */
5672 tmp4 = tcg_temp_new_i32();
5673 tcg_gen_mov_i32(tmp4, tmp2);
5674 tmp3 = neon_load_reg(rn, 1);
5676 for (pass = 0; pass < 2; pass++) {
5678 tmp = neon_load_reg(rn, 0);
5683 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5685 neon_load_reg64(cpu_V1, rd + pass);
5689 gen_neon_negl(cpu_V0, size);
5692 gen_neon_addl(size);
5695 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5697 gen_neon_negl(cpu_V0, size);
5699 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5705 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5710 neon_store_reg64(cpu_V0, rd + pass);
5715 default: /* 14 and 15 are RESERVED */
5719 } else { /* size == 3 */
5722 imm = (insn >> 8) & 0xf;
5727 if (q && ((rd | rn | rm) & 1)) {
5732 neon_load_reg64(cpu_V0, rn);
5734 neon_load_reg64(cpu_V1, rn + 1);
5736 } else if (imm == 8) {
5737 neon_load_reg64(cpu_V0, rn + 1);
5739 neon_load_reg64(cpu_V1, rm);
5742 tmp64 = tcg_temp_new_i64();
5744 neon_load_reg64(cpu_V0, rn);
5745 neon_load_reg64(tmp64, rn + 1);
5747 neon_load_reg64(cpu_V0, rn + 1);
5748 neon_load_reg64(tmp64, rm);
5750 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5751 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5752 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5754 neon_load_reg64(cpu_V1, rm);
5756 neon_load_reg64(cpu_V1, rm + 1);
5759 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5760 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5761 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5762 tcg_temp_free_i64(tmp64);
5765 neon_load_reg64(cpu_V0, rn);
5766 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5767 neon_load_reg64(cpu_V1, rm);
5768 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5769 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5771 neon_store_reg64(cpu_V0, rd);
5773 neon_store_reg64(cpu_V1, rd + 1);
5775 } else if ((insn & (1 << 11)) == 0) {
5776 /* Two register misc. */
5777 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5778 size = (insn >> 18) & 3;
5779 /* UNDEF for unknown op values and bad op-size combinations */
5780 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5783 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5784 q && ((rm | rd) & 1)) {
5788 case NEON_2RM_VREV64:
5789 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5790 tmp = neon_load_reg(rm, pass * 2);
5791 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5793 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5794 case 1: gen_swap_half(tmp); break;
5795 case 2: /* no-op */ break;
5798 neon_store_reg(rd, pass * 2 + 1, tmp);
5800 neon_store_reg(rd, pass * 2, tmp2);
5803 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5804 case 1: gen_swap_half(tmp2); break;
5807 neon_store_reg(rd, pass * 2, tmp2);
5811 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5812 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5813 for (pass = 0; pass < q + 1; pass++) {
5814 tmp = neon_load_reg(rm, pass * 2);
5815 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5816 tmp = neon_load_reg(rm, pass * 2 + 1);
5817 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5819 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5820 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5821 case 2: tcg_gen_add_i64(CPU_V001); break;
5824 if (op >= NEON_2RM_VPADAL) {
5826 neon_load_reg64(cpu_V1, rd + pass);
5827 gen_neon_addl(size);
5829 neon_store_reg64(cpu_V0, rd + pass);
5835 for (n = 0; n < (q ? 4 : 2); n += 2) {
5836 tmp = neon_load_reg(rm, n);
5837 tmp2 = neon_load_reg(rd, n + 1);
5838 neon_store_reg(rm, n, tmp2);
5839 neon_store_reg(rd, n + 1, tmp);
5846 if (gen_neon_unzip(rd, rm, size, q)) {
5851 if (gen_neon_zip(rd, rm, size, q)) {
5855 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5856 /* also VQMOVUN; op field and mnemonics don't line up */
5861 for (pass = 0; pass < 2; pass++) {
5862 neon_load_reg64(cpu_V0, rm + pass);
5863 tmp = tcg_temp_new_i32();
5864 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5869 neon_store_reg(rd, 0, tmp2);
5870 neon_store_reg(rd, 1, tmp);
5874 case NEON_2RM_VSHLL:
5875 if (q || (rd & 1)) {
5878 tmp = neon_load_reg(rm, 0);
5879 tmp2 = neon_load_reg(rm, 1);
5880 for (pass = 0; pass < 2; pass++) {
5883 gen_neon_widen(cpu_V0, tmp, size, 1);
5884 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5885 neon_store_reg64(cpu_V0, rd + pass);
5888 case NEON_2RM_VCVT_F16_F32:
5889 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5893 tmp = tcg_temp_new_i32();
5894 tmp2 = tcg_temp_new_i32();
5895 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5896 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5897 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5898 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5899 tcg_gen_shli_i32(tmp2, tmp2, 16);
5900 tcg_gen_or_i32(tmp2, tmp2, tmp);
5901 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5902 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5903 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5904 neon_store_reg(rd, 0, tmp2);
5905 tmp2 = tcg_temp_new_i32();
5906 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5907 tcg_gen_shli_i32(tmp2, tmp2, 16);
5908 tcg_gen_or_i32(tmp2, tmp2, tmp);
5909 neon_store_reg(rd, 1, tmp2);
5910 tcg_temp_free_i32(tmp);
5912 case NEON_2RM_VCVT_F32_F16:
5913 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5917 tmp3 = tcg_temp_new_i32();
5918 tmp = neon_load_reg(rm, 0);
5919 tmp2 = neon_load_reg(rm, 1);
5920 tcg_gen_ext16u_i32(tmp3, tmp);
5921 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5922 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5923 tcg_gen_shri_i32(tmp3, tmp, 16);
5924 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5925 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5926 tcg_temp_free_i32(tmp);
5927 tcg_gen_ext16u_i32(tmp3, tmp2);
5928 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5929 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5930 tcg_gen_shri_i32(tmp3, tmp2, 16);
5931 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5932 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5933 tcg_temp_free_i32(tmp2);
5934 tcg_temp_free_i32(tmp3);
5938 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5939 if (neon_2rm_is_float_op(op)) {
5940 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5941 neon_reg_offset(rm, pass));
5944 tmp = neon_load_reg(rm, pass);
5947 case NEON_2RM_VREV32:
5949 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5950 case 1: gen_swap_half(tmp); break;
5954 case NEON_2RM_VREV16:
5959 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5960 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5961 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5967 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5968 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5969 case 2: gen_helper_clz(tmp, tmp); break;
5974 gen_helper_neon_cnt_u8(tmp, tmp);
5977 tcg_gen_not_i32(tmp, tmp);
5979 case NEON_2RM_VQABS:
5982 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5985 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5988 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5993 case NEON_2RM_VQNEG:
5996 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5999 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6002 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6007 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6008 tmp2 = tcg_const_i32(0);
6010 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6011 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6012 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6015 tcg_temp_free(tmp2);
6016 if (op == NEON_2RM_VCLE0) {
6017 tcg_gen_not_i32(tmp, tmp);
6020 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6021 tmp2 = tcg_const_i32(0);
6023 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6024 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6025 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6028 tcg_temp_free(tmp2);
6029 if (op == NEON_2RM_VCLT0) {
6030 tcg_gen_not_i32(tmp, tmp);
6033 case NEON_2RM_VCEQ0:
6034 tmp2 = tcg_const_i32(0);
6036 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6037 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6038 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6041 tcg_temp_free(tmp2);
6045 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6046 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6047 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6052 tmp2 = tcg_const_i32(0);
6053 gen_neon_rsb(size, tmp, tmp2);
6054 tcg_temp_free(tmp2);
6056 case NEON_2RM_VCGT0_F:
6058 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6059 tmp2 = tcg_const_i32(0);
6060 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6061 tcg_temp_free(tmp2);
6062 tcg_temp_free_ptr(fpstatus);
6065 case NEON_2RM_VCGE0_F:
6067 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6068 tmp2 = tcg_const_i32(0);
6069 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6070 tcg_temp_free(tmp2);
6071 tcg_temp_free_ptr(fpstatus);
6074 case NEON_2RM_VCEQ0_F:
6076 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6077 tmp2 = tcg_const_i32(0);
6078 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6079 tcg_temp_free(tmp2);
6080 tcg_temp_free_ptr(fpstatus);
6083 case NEON_2RM_VCLE0_F:
6085 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6086 tmp2 = tcg_const_i32(0);
6087 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6088 tcg_temp_free(tmp2);
6089 tcg_temp_free_ptr(fpstatus);
6092 case NEON_2RM_VCLT0_F:
6094 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6095 tmp2 = tcg_const_i32(0);
6096 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6097 tcg_temp_free(tmp2);
6098 tcg_temp_free_ptr(fpstatus);
6101 case NEON_2RM_VABS_F:
6104 case NEON_2RM_VNEG_F:
6108 tmp2 = neon_load_reg(rd, pass);
6109 neon_store_reg(rm, pass, tmp2);
6112 tmp2 = neon_load_reg(rd, pass);
6114 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6115 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6118 neon_store_reg(rm, pass, tmp2);
6120 case NEON_2RM_VRECPE:
6121 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6123 case NEON_2RM_VRSQRTE:
6124 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6126 case NEON_2RM_VRECPE_F:
6127 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6129 case NEON_2RM_VRSQRTE_F:
6130 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6132 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6135 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6138 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6139 gen_vfp_tosiz(0, 1);
6141 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6142 gen_vfp_touiz(0, 1);
6145 /* Reserved op values were caught by the
6146 * neon_2rm_sizes[] check earlier.
6150 if (neon_2rm_is_float_op(op)) {
6151 tcg_gen_st_f32(cpu_F0s, cpu_env,
6152 neon_reg_offset(rd, pass));
6154 neon_store_reg(rd, pass, tmp);
6159 } else if ((insn & (1 << 10)) == 0) {
6161 int n = ((insn >> 8) & 3) + 1;
6162 if ((rn + n) > 32) {
6163 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6164 * helper function running off the end of the register file.
6169 if (insn & (1 << 6)) {
6170 tmp = neon_load_reg(rd, 0);
6172 tmp = tcg_temp_new_i32();
6173 tcg_gen_movi_i32(tmp, 0);
6175 tmp2 = neon_load_reg(rm, 0);
6176 tmp4 = tcg_const_i32(rn);
6177 tmp5 = tcg_const_i32(n);
6178 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6179 tcg_temp_free_i32(tmp);
6180 if (insn & (1 << 6)) {
6181 tmp = neon_load_reg(rd, 1);
6183 tmp = tcg_temp_new_i32();
6184 tcg_gen_movi_i32(tmp, 0);
6186 tmp3 = neon_load_reg(rm, 1);
6187 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6188 tcg_temp_free_i32(tmp5);
6189 tcg_temp_free_i32(tmp4);
6190 neon_store_reg(rd, 0, tmp2);
6191 neon_store_reg(rd, 1, tmp3);
6192 tcg_temp_free_i32(tmp);
6193 } else if ((insn & 0x380) == 0) {
6195 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6198 if (insn & (1 << 19)) {
6199 tmp = neon_load_reg(rm, 1);
6201 tmp = neon_load_reg(rm, 0);
6203 if (insn & (1 << 16)) {
6204 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6205 } else if (insn & (1 << 17)) {
6206 if ((insn >> 18) & 1)
6207 gen_neon_dup_high16(tmp);
6209 gen_neon_dup_low16(tmp);
6211 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6212 tmp2 = tcg_temp_new_i32();
6213 tcg_gen_mov_i32(tmp2, tmp);
6214 neon_store_reg(rd, pass, tmp2);
6216 tcg_temp_free_i32(tmp);
6225 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6227 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6228 const ARMCPRegInfo *ri;
6229 ARMCPU *cpu = arm_env_get_cpu(env);
6231 cpnum = (insn >> 8) & 0xf;
6232 if (arm_feature(env, ARM_FEATURE_XSCALE)
6233 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6236 /* First check for coprocessor space used for actual instructions */
6240 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6241 return disas_iwmmxt_insn(env, s, insn);
6242 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6243 return disas_dsp_insn(env, s, insn);
6248 return disas_vfp_insn (env, s, insn);
6253 /* Otherwise treat as a generic register access */
6254 is64 = (insn & (1 << 25)) == 0;
6255 if (!is64 && ((insn & (1 << 4)) == 0)) {
6263 opc1 = (insn >> 4) & 0xf;
6265 rt2 = (insn >> 16) & 0xf;
6267 crn = (insn >> 16) & 0xf;
6268 opc1 = (insn >> 21) & 7;
6269 opc2 = (insn >> 5) & 7;
6272 isread = (insn >> 20) & 1;
6273 rt = (insn >> 12) & 0xf;
6275 ri = get_arm_cp_reginfo(cpu,
6276 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6278 /* Check access permissions */
6279 if (!cp_access_ok(env, ri, isread)) {
6283 /* Handle special cases first */
6284 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6291 gen_set_pc_im(s->pc);
6292 s->is_jmp = DISAS_WFI;
6303 if (ri->type & ARM_CP_CONST) {
6304 tmp64 = tcg_const_i64(ri->resetvalue);
6305 } else if (ri->readfn) {
6307 gen_set_pc_im(s->pc);
6308 tmp64 = tcg_temp_new_i64();
6309 tmpptr = tcg_const_ptr(ri);
6310 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6311 tcg_temp_free_ptr(tmpptr);
6313 tmp64 = tcg_temp_new_i64();
6314 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6316 tmp = tcg_temp_new_i32();
6317 tcg_gen_trunc_i64_i32(tmp, tmp64);
6318 store_reg(s, rt, tmp);
6319 tcg_gen_shri_i64(tmp64, tmp64, 32);
6320 tcg_gen_trunc_i64_i32(tmp, tmp64);
6321 store_reg(s, rt2, tmp);
6324 if (ri->type & ARM_CP_CONST) {
6325 tmp = tcg_const_i32(ri->resetvalue);
6326 } else if (ri->readfn) {
6328 gen_set_pc_im(s->pc);
6329 tmp = tcg_temp_new_i32();
6330 tmpptr = tcg_const_ptr(ri);
6331 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6332 tcg_temp_free_ptr(tmpptr);
6334 tmp = load_cpu_offset(ri->fieldoffset);
6337 /* Destination register of r15 for 32 bit loads sets
6338 * the condition codes from the high 4 bits of the value
6341 tcg_temp_free_i32(tmp);
6343 store_reg(s, rt, tmp);
6348 if (ri->type & ARM_CP_CONST) {
6349 /* If not forbidden by access permissions, treat as WI */
6355 TCGv_i64 tmp64 = tcg_temp_new_i64();
6356 tmplo = load_reg(s, rt);
6357 tmphi = load_reg(s, rt2);
6358 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6359 tcg_temp_free_i32(tmplo);
6360 tcg_temp_free_i32(tmphi);
6362 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6363 gen_set_pc_im(s->pc);
6364 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6365 tcg_temp_free_ptr(tmpptr);
6367 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6369 tcg_temp_free_i64(tmp64);
6374 gen_set_pc_im(s->pc);
6375 tmp = load_reg(s, rt);
6376 tmpptr = tcg_const_ptr(ri);
6377 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6378 tcg_temp_free_ptr(tmpptr);
6379 tcg_temp_free_i32(tmp);
6381 TCGv tmp = load_reg(s, rt);
6382 store_cpu_offset(tmp, ri->fieldoffset);
6385 /* We default to ending the TB on a coprocessor register write,
6386 * but allow this to be suppressed by the register definition
6387 * (usually only necessary to work around guest bugs).
6389 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6396 /* Fallback code: handle coprocessor registers not yet converted
6401 return disas_cp15_insn (env, s, insn);
6408 /* Store a 64-bit value to a register pair. Clobbers val. */
6409 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6412 tmp = tcg_temp_new_i32();
6413 tcg_gen_trunc_i64_i32(tmp, val);
6414 store_reg(s, rlow, tmp);
6415 tmp = tcg_temp_new_i32();
6416 tcg_gen_shri_i64(val, val, 32);
6417 tcg_gen_trunc_i64_i32(tmp, val);
6418 store_reg(s, rhigh, tmp);
6421 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6422 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6427 /* Load value and extend to 64 bits. */
6428 tmp = tcg_temp_new_i64();
6429 tmp2 = load_reg(s, rlow);
6430 tcg_gen_extu_i32_i64(tmp, tmp2);
6431 tcg_temp_free_i32(tmp2);
6432 tcg_gen_add_i64(val, val, tmp);
6433 tcg_temp_free_i64(tmp);
6436 /* load and add a 64-bit value from a register pair. */
6437 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6443 /* Load 64-bit value rd:rn. */
6444 tmpl = load_reg(s, rlow);
6445 tmph = load_reg(s, rhigh);
6446 tmp = tcg_temp_new_i64();
6447 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6448 tcg_temp_free_i32(tmpl);
6449 tcg_temp_free_i32(tmph);
6450 tcg_gen_add_i64(val, val, tmp);
6451 tcg_temp_free_i64(tmp);
6454 /* Set N and Z flags from a 64-bit value. */
6455 static void gen_logicq_cc(TCGv_i64 val)
6457 TCGv tmp = tcg_temp_new_i32();
6458 gen_helper_logicq_cc(tmp, val);
6460 tcg_temp_free_i32(tmp);
6463 /* Load/Store exclusive instructions are implemented by remembering
6464 the value/address loaded, and seeing if these are the same
6465 when the store is performed. This should be is sufficient to implement
6466 the architecturally mandated semantics, and avoids having to monitor
6469 In system emulation mode only one CPU will be running at once, so
6470 this sequence is effectively atomic. In user emulation mode we
6471 throw an exception and handle the atomic operation elsewhere. */
6472 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6473 TCGv addr, int size)
6479 tmp = gen_ld8u(addr, IS_USER(s));
6482 tmp = gen_ld16u(addr, IS_USER(s));
6486 tmp = gen_ld32(addr, IS_USER(s));
6491 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6492 store_reg(s, rt, tmp);
6494 TCGv tmp2 = tcg_temp_new_i32();
6495 tcg_gen_addi_i32(tmp2, addr, 4);
6496 tmp = gen_ld32(tmp2, IS_USER(s));
6497 tcg_temp_free_i32(tmp2);
6498 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6499 store_reg(s, rt2, tmp);
6501 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6504 static void gen_clrex(DisasContext *s)
6506 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6509 #ifdef CONFIG_USER_ONLY
6510 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6511 TCGv addr, int size)
6513 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6514 tcg_gen_movi_i32(cpu_exclusive_info,
6515 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6516 gen_exception_insn(s, 4, EXCP_STREX);
6519 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6520 TCGv addr, int size)
6526 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6532 fail_label = gen_new_label();
6533 done_label = gen_new_label();
6534 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6537 tmp = gen_ld8u(addr, IS_USER(s));
6540 tmp = gen_ld16u(addr, IS_USER(s));
6544 tmp = gen_ld32(addr, IS_USER(s));
6549 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6550 tcg_temp_free_i32(tmp);
6552 TCGv tmp2 = tcg_temp_new_i32();
6553 tcg_gen_addi_i32(tmp2, addr, 4);
6554 tmp = gen_ld32(tmp2, IS_USER(s));
6555 tcg_temp_free_i32(tmp2);
6556 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6557 tcg_temp_free_i32(tmp);
6559 tmp = load_reg(s, rt);
6562 gen_st8(tmp, addr, IS_USER(s));
6565 gen_st16(tmp, addr, IS_USER(s));
6569 gen_st32(tmp, addr, IS_USER(s));
6575 tcg_gen_addi_i32(addr, addr, 4);
6576 tmp = load_reg(s, rt2);
6577 gen_st32(tmp, addr, IS_USER(s));
6579 tcg_gen_movi_i32(cpu_R[rd], 0);
6580 tcg_gen_br(done_label);
6581 gen_set_label(fail_label);
6582 tcg_gen_movi_i32(cpu_R[rd], 1);
6583 gen_set_label(done_label);
6584 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6588 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6590 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6597 insn = arm_ldl_code(s->pc, s->bswap_code);
6600 /* M variants do not implement ARM mode. */
6605 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6606 * choose to UNDEF. In ARMv5 and above the space is used
6607 * for miscellaneous unconditional instructions.
6611 /* Unconditional instructions. */
6612 if (((insn >> 25) & 7) == 1) {
6613 /* NEON Data processing. */
6614 if (!arm_feature(env, ARM_FEATURE_NEON))
6617 if (disas_neon_data_insn(env, s, insn))
6621 if ((insn & 0x0f100000) == 0x04000000) {
6622 /* NEON load/store. */
6623 if (!arm_feature(env, ARM_FEATURE_NEON))
6626 if (disas_neon_ls_insn(env, s, insn))
6630 if (((insn & 0x0f30f000) == 0x0510f000) ||
6631 ((insn & 0x0f30f010) == 0x0710f000)) {
6632 if ((insn & (1 << 22)) == 0) {
6634 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6638 /* Otherwise PLD; v5TE+ */
6642 if (((insn & 0x0f70f000) == 0x0450f000) ||
6643 ((insn & 0x0f70f010) == 0x0650f000)) {
6645 return; /* PLI; V7 */
6647 if (((insn & 0x0f700000) == 0x04100000) ||
6648 ((insn & 0x0f700010) == 0x06100000)) {
6649 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6652 return; /* v7MP: Unallocated memory hint: must NOP */
6655 if ((insn & 0x0ffffdff) == 0x01010000) {
6658 if (((insn >> 9) & 1) != s->bswap_code) {
6659 /* Dynamic endianness switching not implemented. */
6663 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6664 switch ((insn >> 4) & 0xf) {
6673 /* We don't emulate caches so these are a no-op. */
6678 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6684 op1 = (insn & 0x1f);
6685 addr = tcg_temp_new_i32();
6686 tmp = tcg_const_i32(op1);
6687 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6688 tcg_temp_free_i32(tmp);
6689 i = (insn >> 23) & 3;
6691 case 0: offset = -4; break; /* DA */
6692 case 1: offset = 0; break; /* IA */
6693 case 2: offset = -8; break; /* DB */
6694 case 3: offset = 4; break; /* IB */
6698 tcg_gen_addi_i32(addr, addr, offset);
6699 tmp = load_reg(s, 14);
6700 gen_st32(tmp, addr, 0);
6701 tmp = load_cpu_field(spsr);
6702 tcg_gen_addi_i32(addr, addr, 4);
6703 gen_st32(tmp, addr, 0);
6704 if (insn & (1 << 21)) {
6705 /* Base writeback. */
6707 case 0: offset = -8; break;
6708 case 1: offset = 4; break;
6709 case 2: offset = -4; break;
6710 case 3: offset = 0; break;
6714 tcg_gen_addi_i32(addr, addr, offset);
6715 tmp = tcg_const_i32(op1);
6716 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6717 tcg_temp_free_i32(tmp);
6718 tcg_temp_free_i32(addr);
6720 tcg_temp_free_i32(addr);
6723 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6729 rn = (insn >> 16) & 0xf;
6730 addr = load_reg(s, rn);
6731 i = (insn >> 23) & 3;
6733 case 0: offset = -4; break; /* DA */
6734 case 1: offset = 0; break; /* IA */
6735 case 2: offset = -8; break; /* DB */
6736 case 3: offset = 4; break; /* IB */
6740 tcg_gen_addi_i32(addr, addr, offset);
6741 /* Load PC into tmp and CPSR into tmp2. */
6742 tmp = gen_ld32(addr, 0);
6743 tcg_gen_addi_i32(addr, addr, 4);
6744 tmp2 = gen_ld32(addr, 0);
6745 if (insn & (1 << 21)) {
6746 /* Base writeback. */
6748 case 0: offset = -8; break;
6749 case 1: offset = 4; break;
6750 case 2: offset = -4; break;
6751 case 3: offset = 0; break;
6755 tcg_gen_addi_i32(addr, addr, offset);
6756 store_reg(s, rn, addr);
6758 tcg_temp_free_i32(addr);
6760 gen_rfe(s, tmp, tmp2);
6762 } else if ((insn & 0x0e000000) == 0x0a000000) {
6763 /* branch link and change to thumb (blx <offset>) */
6766 val = (uint32_t)s->pc;
6767 tmp = tcg_temp_new_i32();
6768 tcg_gen_movi_i32(tmp, val);
6769 store_reg(s, 14, tmp);
6770 /* Sign-extend the 24-bit offset */
6771 offset = (((int32_t)insn) << 8) >> 8;
6772 /* offset * 4 + bit24 * 2 + (thumb bit) */
6773 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6774 /* pipeline offset */
6776 /* protected by ARCH(5); above, near the start of uncond block */
6779 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6780 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6781 /* iWMMXt register transfer. */
6782 if (env->cp15.c15_cpar & (1 << 1))
6783 if (!disas_iwmmxt_insn(env, s, insn))
6786 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6787 /* Coprocessor double register transfer. */
6789 } else if ((insn & 0x0f000010) == 0x0e000010) {
6790 /* Additional coprocessor register transfer. */
6791 } else if ((insn & 0x0ff10020) == 0x01000000) {
6794 /* cps (privileged) */
6798 if (insn & (1 << 19)) {
6799 if (insn & (1 << 8))
6801 if (insn & (1 << 7))
6803 if (insn & (1 << 6))
6805 if (insn & (1 << 18))
6808 if (insn & (1 << 17)) {
6810 val |= (insn & 0x1f);
6813 gen_set_psr_im(s, mask, 0, val);
6820 /* if not always execute, we generate a conditional jump to
6822 s->condlabel = gen_new_label();
6823 gen_test_cc(cond ^ 1, s->condlabel);
6826 if ((insn & 0x0f900000) == 0x03000000) {
6827 if ((insn & (1 << 21)) == 0) {
6829 rd = (insn >> 12) & 0xf;
6830 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6831 if ((insn & (1 << 22)) == 0) {
6833 tmp = tcg_temp_new_i32();
6834 tcg_gen_movi_i32(tmp, val);
6837 tmp = load_reg(s, rd);
6838 tcg_gen_ext16u_i32(tmp, tmp);
6839 tcg_gen_ori_i32(tmp, tmp, val << 16);
6841 store_reg(s, rd, tmp);
6843 if (((insn >> 12) & 0xf) != 0xf)
6845 if (((insn >> 16) & 0xf) == 0) {
6846 gen_nop_hint(s, insn & 0xff);
6848 /* CPSR = immediate */
6850 shift = ((insn >> 8) & 0xf) * 2;
6852 val = (val >> shift) | (val << (32 - shift));
6853 i = ((insn & (1 << 22)) != 0);
6854 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6858 } else if ((insn & 0x0f900000) == 0x01000000
6859 && (insn & 0x00000090) != 0x00000090) {
6860 /* miscellaneous instructions */
6861 op1 = (insn >> 21) & 3;
6862 sh = (insn >> 4) & 0xf;
6865 case 0x0: /* move program status register */
6868 tmp = load_reg(s, rm);
6869 i = ((op1 & 2) != 0);
6870 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6874 rd = (insn >> 12) & 0xf;
6878 tmp = load_cpu_field(spsr);
6880 tmp = tcg_temp_new_i32();
6881 gen_helper_cpsr_read(tmp);
6883 store_reg(s, rd, tmp);
6888 /* branch/exchange thumb (bx). */
6890 tmp = load_reg(s, rm);
6892 } else if (op1 == 3) {
6895 rd = (insn >> 12) & 0xf;
6896 tmp = load_reg(s, rm);
6897 gen_helper_clz(tmp, tmp);
6898 store_reg(s, rd, tmp);
6906 /* Trivial implementation equivalent to bx. */
6907 tmp = load_reg(s, rm);
6918 /* branch link/exchange thumb (blx) */
6919 tmp = load_reg(s, rm);
6920 tmp2 = tcg_temp_new_i32();
6921 tcg_gen_movi_i32(tmp2, s->pc);
6922 store_reg(s, 14, tmp2);
6925 case 0x5: /* saturating add/subtract */
6927 rd = (insn >> 12) & 0xf;
6928 rn = (insn >> 16) & 0xf;
6929 tmp = load_reg(s, rm);
6930 tmp2 = load_reg(s, rn);
6932 gen_helper_double_saturate(tmp2, tmp2);
6934 gen_helper_sub_saturate(tmp, tmp, tmp2);
6936 gen_helper_add_saturate(tmp, tmp, tmp2);
6937 tcg_temp_free_i32(tmp2);
6938 store_reg(s, rd, tmp);
6941 /* SMC instruction (op1 == 3)
6942 and undefined instructions (op1 == 0 || op1 == 2)
6949 gen_exception_insn(s, 4, EXCP_BKPT);
6951 case 0x8: /* signed multiply */
6956 rs = (insn >> 8) & 0xf;
6957 rn = (insn >> 12) & 0xf;
6958 rd = (insn >> 16) & 0xf;
6960 /* (32 * 16) >> 16 */
6961 tmp = load_reg(s, rm);
6962 tmp2 = load_reg(s, rs);
6964 tcg_gen_sari_i32(tmp2, tmp2, 16);
6967 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6968 tcg_gen_shri_i64(tmp64, tmp64, 16);
6969 tmp = tcg_temp_new_i32();
6970 tcg_gen_trunc_i64_i32(tmp, tmp64);
6971 tcg_temp_free_i64(tmp64);
6972 if ((sh & 2) == 0) {
6973 tmp2 = load_reg(s, rn);
6974 gen_helper_add_setq(tmp, tmp, tmp2);
6975 tcg_temp_free_i32(tmp2);
6977 store_reg(s, rd, tmp);
6980 tmp = load_reg(s, rm);
6981 tmp2 = load_reg(s, rs);
6982 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6983 tcg_temp_free_i32(tmp2);
6985 tmp64 = tcg_temp_new_i64();
6986 tcg_gen_ext_i32_i64(tmp64, tmp);
6987 tcg_temp_free_i32(tmp);
6988 gen_addq(s, tmp64, rn, rd);
6989 gen_storeq_reg(s, rn, rd, tmp64);
6990 tcg_temp_free_i64(tmp64);
6993 tmp2 = load_reg(s, rn);
6994 gen_helper_add_setq(tmp, tmp, tmp2);
6995 tcg_temp_free_i32(tmp2);
6997 store_reg(s, rd, tmp);
7004 } else if (((insn & 0x0e000000) == 0 &&
7005 (insn & 0x00000090) != 0x90) ||
7006 ((insn & 0x0e000000) == (1 << 25))) {
7007 int set_cc, logic_cc, shiftop;
7009 op1 = (insn >> 21) & 0xf;
7010 set_cc = (insn >> 20) & 1;
7011 logic_cc = table_logic_cc[op1] & set_cc;
7013 /* data processing instruction */
7014 if (insn & (1 << 25)) {
7015 /* immediate operand */
7017 shift = ((insn >> 8) & 0xf) * 2;
7019 val = (val >> shift) | (val << (32 - shift));
7021 tmp2 = tcg_temp_new_i32();
7022 tcg_gen_movi_i32(tmp2, val);
7023 if (logic_cc && shift) {
7024 gen_set_CF_bit31(tmp2);
7029 tmp2 = load_reg(s, rm);
7030 shiftop = (insn >> 5) & 3;
7031 if (!(insn & (1 << 4))) {
7032 shift = (insn >> 7) & 0x1f;
7033 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7035 rs = (insn >> 8) & 0xf;
7036 tmp = load_reg(s, rs);
7037 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7040 if (op1 != 0x0f && op1 != 0x0d) {
7041 rn = (insn >> 16) & 0xf;
7042 tmp = load_reg(s, rn);
7046 rd = (insn >> 12) & 0xf;
7049 tcg_gen_and_i32(tmp, tmp, tmp2);
7053 store_reg_bx(env, s, rd, tmp);
7056 tcg_gen_xor_i32(tmp, tmp, tmp2);
7060 store_reg_bx(env, s, rd, tmp);
7063 if (set_cc && rd == 15) {
7064 /* SUBS r15, ... is used for exception return. */
7068 gen_helper_sub_cc(tmp, tmp, tmp2);
7069 gen_exception_return(s, tmp);
7072 gen_helper_sub_cc(tmp, tmp, tmp2);
7074 tcg_gen_sub_i32(tmp, tmp, tmp2);
7076 store_reg_bx(env, s, rd, tmp);
7081 gen_helper_sub_cc(tmp, tmp2, tmp);
7083 tcg_gen_sub_i32(tmp, tmp2, tmp);
7085 store_reg_bx(env, s, rd, tmp);
7089 gen_helper_add_cc(tmp, tmp, tmp2);
7091 tcg_gen_add_i32(tmp, tmp, tmp2);
7093 store_reg_bx(env, s, rd, tmp);
7097 gen_helper_adc_cc(tmp, tmp, tmp2);
7099 gen_add_carry(tmp, tmp, tmp2);
7101 store_reg_bx(env, s, rd, tmp);
7105 gen_helper_sbc_cc(tmp, tmp, tmp2);
7107 gen_sub_carry(tmp, tmp, tmp2);
7109 store_reg_bx(env, s, rd, tmp);
7113 gen_helper_sbc_cc(tmp, tmp2, tmp);
7115 gen_sub_carry(tmp, tmp2, tmp);
7117 store_reg_bx(env, s, rd, tmp);
7121 tcg_gen_and_i32(tmp, tmp, tmp2);
7124 tcg_temp_free_i32(tmp);
7128 tcg_gen_xor_i32(tmp, tmp, tmp2);
7131 tcg_temp_free_i32(tmp);
7135 gen_helper_sub_cc(tmp, tmp, tmp2);
7137 tcg_temp_free_i32(tmp);
7141 gen_helper_add_cc(tmp, tmp, tmp2);
7143 tcg_temp_free_i32(tmp);
7146 tcg_gen_or_i32(tmp, tmp, tmp2);
7150 store_reg_bx(env, s, rd, tmp);
7153 if (logic_cc && rd == 15) {
7154 /* MOVS r15, ... is used for exception return. */
7158 gen_exception_return(s, tmp2);
7163 store_reg_bx(env, s, rd, tmp2);
7167 tcg_gen_andc_i32(tmp, tmp, tmp2);
7171 store_reg_bx(env, s, rd, tmp);
7175 tcg_gen_not_i32(tmp2, tmp2);
7179 store_reg_bx(env, s, rd, tmp2);
7182 if (op1 != 0x0f && op1 != 0x0d) {
7183 tcg_temp_free_i32(tmp2);
7186 /* other instructions */
7187 op1 = (insn >> 24) & 0xf;
7191 /* multiplies, extra load/stores */
7192 sh = (insn >> 5) & 3;
7195 rd = (insn >> 16) & 0xf;
7196 rn = (insn >> 12) & 0xf;
7197 rs = (insn >> 8) & 0xf;
7199 op1 = (insn >> 20) & 0xf;
7201 case 0: case 1: case 2: case 3: case 6:
7203 tmp = load_reg(s, rs);
7204 tmp2 = load_reg(s, rm);
7205 tcg_gen_mul_i32(tmp, tmp, tmp2);
7206 tcg_temp_free_i32(tmp2);
7207 if (insn & (1 << 22)) {
7208 /* Subtract (mls) */
7210 tmp2 = load_reg(s, rn);
7211 tcg_gen_sub_i32(tmp, tmp2, tmp);
7212 tcg_temp_free_i32(tmp2);
7213 } else if (insn & (1 << 21)) {
7215 tmp2 = load_reg(s, rn);
7216 tcg_gen_add_i32(tmp, tmp, tmp2);
7217 tcg_temp_free_i32(tmp2);
7219 if (insn & (1 << 20))
7221 store_reg(s, rd, tmp);
7224 /* 64 bit mul double accumulate (UMAAL) */
7226 tmp = load_reg(s, rs);
7227 tmp2 = load_reg(s, rm);
7228 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7229 gen_addq_lo(s, tmp64, rn);
7230 gen_addq_lo(s, tmp64, rd);
7231 gen_storeq_reg(s, rn, rd, tmp64);
7232 tcg_temp_free_i64(tmp64);
7234 case 8: case 9: case 10: case 11:
7235 case 12: case 13: case 14: case 15:
7236 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7237 tmp = load_reg(s, rs);
7238 tmp2 = load_reg(s, rm);
7239 if (insn & (1 << 22)) {
7240 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7242 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7244 if (insn & (1 << 21)) { /* mult accumulate */
7245 gen_addq(s, tmp64, rn, rd);
7247 if (insn & (1 << 20)) {
7248 gen_logicq_cc(tmp64);
7250 gen_storeq_reg(s, rn, rd, tmp64);
7251 tcg_temp_free_i64(tmp64);
7257 rn = (insn >> 16) & 0xf;
7258 rd = (insn >> 12) & 0xf;
7259 if (insn & (1 << 23)) {
7260 /* load/store exclusive */
7261 op1 = (insn >> 21) & 0x3;
7266 addr = tcg_temp_local_new_i32();
7267 load_reg_var(s, addr, rn);
7268 if (insn & (1 << 20)) {
7271 gen_load_exclusive(s, rd, 15, addr, 2);
7273 case 1: /* ldrexd */
7274 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7276 case 2: /* ldrexb */
7277 gen_load_exclusive(s, rd, 15, addr, 0);
7279 case 3: /* ldrexh */
7280 gen_load_exclusive(s, rd, 15, addr, 1);
7289 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7291 case 1: /* strexd */
7292 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7294 case 2: /* strexb */
7295 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7297 case 3: /* strexh */
7298 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7304 tcg_temp_free(addr);
7306 /* SWP instruction */
7309 /* ??? This is not really atomic. However we know
7310 we never have multiple CPUs running in parallel,
7311 so it is good enough. */
7312 addr = load_reg(s, rn);
7313 tmp = load_reg(s, rm);
7314 if (insn & (1 << 22)) {
7315 tmp2 = gen_ld8u(addr, IS_USER(s));
7316 gen_st8(tmp, addr, IS_USER(s));
7318 tmp2 = gen_ld32(addr, IS_USER(s));
7319 gen_st32(tmp, addr, IS_USER(s));
7321 tcg_temp_free_i32(addr);
7322 store_reg(s, rd, tmp2);
7328 /* Misc load/store */
7329 rn = (insn >> 16) & 0xf;
7330 rd = (insn >> 12) & 0xf;
7331 addr = load_reg(s, rn);
7332 if (insn & (1 << 24))
7333 gen_add_datah_offset(s, insn, 0, addr);
7335 if (insn & (1 << 20)) {
7339 tmp = gen_ld16u(addr, IS_USER(s));
7342 tmp = gen_ld8s(addr, IS_USER(s));
7346 tmp = gen_ld16s(addr, IS_USER(s));
7350 } else if (sh & 2) {
7355 tmp = load_reg(s, rd);
7356 gen_st32(tmp, addr, IS_USER(s));
7357 tcg_gen_addi_i32(addr, addr, 4);
7358 tmp = load_reg(s, rd + 1);
7359 gen_st32(tmp, addr, IS_USER(s));
7363 tmp = gen_ld32(addr, IS_USER(s));
7364 store_reg(s, rd, tmp);
7365 tcg_gen_addi_i32(addr, addr, 4);
7366 tmp = gen_ld32(addr, IS_USER(s));
7370 address_offset = -4;
7373 tmp = load_reg(s, rd);
7374 gen_st16(tmp, addr, IS_USER(s));
7377 /* Perform base writeback before the loaded value to
7378 ensure correct behavior with overlapping index registers.
7379 ldrd with base writeback is is undefined if the
7380 destination and index registers overlap. */
7381 if (!(insn & (1 << 24))) {
7382 gen_add_datah_offset(s, insn, address_offset, addr);
7383 store_reg(s, rn, addr);
7384 } else if (insn & (1 << 21)) {
7386 tcg_gen_addi_i32(addr, addr, address_offset);
7387 store_reg(s, rn, addr);
7389 tcg_temp_free_i32(addr);
7392 /* Complete the load. */
7393 store_reg(s, rd, tmp);
7402 if (insn & (1 << 4)) {
7404 /* Armv6 Media instructions. */
7406 rn = (insn >> 16) & 0xf;
7407 rd = (insn >> 12) & 0xf;
7408 rs = (insn >> 8) & 0xf;
7409 switch ((insn >> 23) & 3) {
7410 case 0: /* Parallel add/subtract. */
7411 op1 = (insn >> 20) & 7;
7412 tmp = load_reg(s, rn);
7413 tmp2 = load_reg(s, rm);
7414 sh = (insn >> 5) & 7;
7415 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7417 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7418 tcg_temp_free_i32(tmp2);
7419 store_reg(s, rd, tmp);
7422 if ((insn & 0x00700020) == 0) {
7423 /* Halfword pack. */
7424 tmp = load_reg(s, rn);
7425 tmp2 = load_reg(s, rm);
7426 shift = (insn >> 7) & 0x1f;
7427 if (insn & (1 << 6)) {
7431 tcg_gen_sari_i32(tmp2, tmp2, shift);
7432 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7433 tcg_gen_ext16u_i32(tmp2, tmp2);
7437 tcg_gen_shli_i32(tmp2, tmp2, shift);
7438 tcg_gen_ext16u_i32(tmp, tmp);
7439 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7441 tcg_gen_or_i32(tmp, tmp, tmp2);
7442 tcg_temp_free_i32(tmp2);
7443 store_reg(s, rd, tmp);
7444 } else if ((insn & 0x00200020) == 0x00200000) {
7446 tmp = load_reg(s, rm);
7447 shift = (insn >> 7) & 0x1f;
7448 if (insn & (1 << 6)) {
7451 tcg_gen_sari_i32(tmp, tmp, shift);
7453 tcg_gen_shli_i32(tmp, tmp, shift);
7455 sh = (insn >> 16) & 0x1f;
7456 tmp2 = tcg_const_i32(sh);
7457 if (insn & (1 << 22))
7458 gen_helper_usat(tmp, tmp, tmp2);
7460 gen_helper_ssat(tmp, tmp, tmp2);
7461 tcg_temp_free_i32(tmp2);
7462 store_reg(s, rd, tmp);
7463 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7465 tmp = load_reg(s, rm);
7466 sh = (insn >> 16) & 0x1f;
7467 tmp2 = tcg_const_i32(sh);
7468 if (insn & (1 << 22))
7469 gen_helper_usat16(tmp, tmp, tmp2);
7471 gen_helper_ssat16(tmp, tmp, tmp2);
7472 tcg_temp_free_i32(tmp2);
7473 store_reg(s, rd, tmp);
7474 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7476 tmp = load_reg(s, rn);
7477 tmp2 = load_reg(s, rm);
7478 tmp3 = tcg_temp_new_i32();
7479 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7480 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7481 tcg_temp_free_i32(tmp3);
7482 tcg_temp_free_i32(tmp2);
7483 store_reg(s, rd, tmp);
7484 } else if ((insn & 0x000003e0) == 0x00000060) {
7485 tmp = load_reg(s, rm);
7486 shift = (insn >> 10) & 3;
7487 /* ??? In many cases it's not necessary to do a
7488 rotate, a shift is sufficient. */
7490 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7491 op1 = (insn >> 20) & 7;
7493 case 0: gen_sxtb16(tmp); break;
7494 case 2: gen_sxtb(tmp); break;
7495 case 3: gen_sxth(tmp); break;
7496 case 4: gen_uxtb16(tmp); break;
7497 case 6: gen_uxtb(tmp); break;
7498 case 7: gen_uxth(tmp); break;
7499 default: goto illegal_op;
7502 tmp2 = load_reg(s, rn);
7503 if ((op1 & 3) == 0) {
7504 gen_add16(tmp, tmp2);
7506 tcg_gen_add_i32(tmp, tmp, tmp2);
7507 tcg_temp_free_i32(tmp2);
7510 store_reg(s, rd, tmp);
7511 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7513 tmp = load_reg(s, rm);
7514 if (insn & (1 << 22)) {
7515 if (insn & (1 << 7)) {
7519 gen_helper_rbit(tmp, tmp);
7522 if (insn & (1 << 7))
7525 tcg_gen_bswap32_i32(tmp, tmp);
7527 store_reg(s, rd, tmp);
7532 case 2: /* Multiplies (Type 3). */
7533 switch ((insn >> 20) & 0x7) {
7535 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7536 /* op2 not 00x or 11x : UNDEF */
7539 /* Signed multiply most significant [accumulate].
7540 (SMMUL, SMMLA, SMMLS) */
7541 tmp = load_reg(s, rm);
7542 tmp2 = load_reg(s, rs);
7543 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7546 tmp = load_reg(s, rd);
7547 if (insn & (1 << 6)) {
7548 tmp64 = gen_subq_msw(tmp64, tmp);
7550 tmp64 = gen_addq_msw(tmp64, tmp);
7553 if (insn & (1 << 5)) {
7554 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7556 tcg_gen_shri_i64(tmp64, tmp64, 32);
7557 tmp = tcg_temp_new_i32();
7558 tcg_gen_trunc_i64_i32(tmp, tmp64);
7559 tcg_temp_free_i64(tmp64);
7560 store_reg(s, rn, tmp);
7564 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7565 if (insn & (1 << 7)) {
7568 tmp = load_reg(s, rm);
7569 tmp2 = load_reg(s, rs);
7570 if (insn & (1 << 5))
7571 gen_swap_half(tmp2);
7572 gen_smul_dual(tmp, tmp2);
7573 if (insn & (1 << 6)) {
7574 /* This subtraction cannot overflow. */
7575 tcg_gen_sub_i32(tmp, tmp, tmp2);
7577 /* This addition cannot overflow 32 bits;
7578 * however it may overflow considered as a signed
7579 * operation, in which case we must set the Q flag.
7581 gen_helper_add_setq(tmp, tmp, tmp2);
7583 tcg_temp_free_i32(tmp2);
7584 if (insn & (1 << 22)) {
7585 /* smlald, smlsld */
7586 tmp64 = tcg_temp_new_i64();
7587 tcg_gen_ext_i32_i64(tmp64, tmp);
7588 tcg_temp_free_i32(tmp);
7589 gen_addq(s, tmp64, rd, rn);
7590 gen_storeq_reg(s, rd, rn, tmp64);
7591 tcg_temp_free_i64(tmp64);
7593 /* smuad, smusd, smlad, smlsd */
7596 tmp2 = load_reg(s, rd);
7597 gen_helper_add_setq(tmp, tmp, tmp2);
7598 tcg_temp_free_i32(tmp2);
7600 store_reg(s, rn, tmp);
7606 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7609 if (((insn >> 5) & 7) || (rd != 15)) {
7612 tmp = load_reg(s, rm);
7613 tmp2 = load_reg(s, rs);
7614 if (insn & (1 << 21)) {
7615 gen_helper_udiv(tmp, tmp, tmp2);
7617 gen_helper_sdiv(tmp, tmp, tmp2);
7619 tcg_temp_free_i32(tmp2);
7620 store_reg(s, rn, tmp);
7627 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7629 case 0: /* Unsigned sum of absolute differences. */
7631 tmp = load_reg(s, rm);
7632 tmp2 = load_reg(s, rs);
7633 gen_helper_usad8(tmp, tmp, tmp2);
7634 tcg_temp_free_i32(tmp2);
7636 tmp2 = load_reg(s, rd);
7637 tcg_gen_add_i32(tmp, tmp, tmp2);
7638 tcg_temp_free_i32(tmp2);
7640 store_reg(s, rn, tmp);
7642 case 0x20: case 0x24: case 0x28: case 0x2c:
7643 /* Bitfield insert/clear. */
7645 shift = (insn >> 7) & 0x1f;
7646 i = (insn >> 16) & 0x1f;
7649 tmp = tcg_temp_new_i32();
7650 tcg_gen_movi_i32(tmp, 0);
7652 tmp = load_reg(s, rm);
7655 tmp2 = load_reg(s, rd);
7656 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7657 tcg_temp_free_i32(tmp2);
7659 store_reg(s, rd, tmp);
7661 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7662 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7664 tmp = load_reg(s, rm);
7665 shift = (insn >> 7) & 0x1f;
7666 i = ((insn >> 16) & 0x1f) + 1;
7671 gen_ubfx(tmp, shift, (1u << i) - 1);
7673 gen_sbfx(tmp, shift, i);
7676 store_reg(s, rd, tmp);
7686 /* Check for undefined extension instructions
7687 * per the ARM Bible IE:
7688 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7690 sh = (0xf << 20) | (0xf << 4);
7691 if (op1 == 0x7 && ((insn & sh) == sh))
7695 /* load/store byte/word */
7696 rn = (insn >> 16) & 0xf;
7697 rd = (insn >> 12) & 0xf;
7698 tmp2 = load_reg(s, rn);
7699 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7700 if (insn & (1 << 24))
7701 gen_add_data_offset(s, insn, tmp2);
7702 if (insn & (1 << 20)) {
7704 if (insn & (1 << 22)) {
7705 tmp = gen_ld8u(tmp2, i);
7707 tmp = gen_ld32(tmp2, i);
7711 tmp = load_reg(s, rd);
7712 if (insn & (1 << 22))
7713 gen_st8(tmp, tmp2, i);
7715 gen_st32(tmp, tmp2, i);
7717 if (!(insn & (1 << 24))) {
7718 gen_add_data_offset(s, insn, tmp2);
7719 store_reg(s, rn, tmp2);
7720 } else if (insn & (1 << 21)) {
7721 store_reg(s, rn, tmp2);
7723 tcg_temp_free_i32(tmp2);
7725 if (insn & (1 << 20)) {
7726 /* Complete the load. */
7727 store_reg_from_load(env, s, rd, tmp);
7733 int j, n, user, loaded_base;
7735 /* load/store multiple words */
7736 /* XXX: store correct base if write back */
7738 if (insn & (1 << 22)) {
7740 goto illegal_op; /* only usable in supervisor mode */
7742 if ((insn & (1 << 15)) == 0)
7745 rn = (insn >> 16) & 0xf;
7746 addr = load_reg(s, rn);
7748 /* compute total size */
7750 TCGV_UNUSED(loaded_var);
7753 if (insn & (1 << i))
7756 /* XXX: test invalid n == 0 case ? */
7757 if (insn & (1 << 23)) {
7758 if (insn & (1 << 24)) {
7760 tcg_gen_addi_i32(addr, addr, 4);
7762 /* post increment */
7765 if (insn & (1 << 24)) {
7767 tcg_gen_addi_i32(addr, addr, -(n * 4));
7769 /* post decrement */
7771 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7776 if (insn & (1 << i)) {
7777 if (insn & (1 << 20)) {
7779 tmp = gen_ld32(addr, IS_USER(s));
7781 tmp2 = tcg_const_i32(i);
7782 gen_helper_set_user_reg(tmp2, tmp);
7783 tcg_temp_free_i32(tmp2);
7784 tcg_temp_free_i32(tmp);
7785 } else if (i == rn) {
7789 store_reg_from_load(env, s, i, tmp);
7794 /* special case: r15 = PC + 8 */
7795 val = (long)s->pc + 4;
7796 tmp = tcg_temp_new_i32();
7797 tcg_gen_movi_i32(tmp, val);
7799 tmp = tcg_temp_new_i32();
7800 tmp2 = tcg_const_i32(i);
7801 gen_helper_get_user_reg(tmp, tmp2);
7802 tcg_temp_free_i32(tmp2);
7804 tmp = load_reg(s, i);
7806 gen_st32(tmp, addr, IS_USER(s));
7809 /* no need to add after the last transfer */
7811 tcg_gen_addi_i32(addr, addr, 4);
7814 if (insn & (1 << 21)) {
7816 if (insn & (1 << 23)) {
7817 if (insn & (1 << 24)) {
7820 /* post increment */
7821 tcg_gen_addi_i32(addr, addr, 4);
7824 if (insn & (1 << 24)) {
7827 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7829 /* post decrement */
7830 tcg_gen_addi_i32(addr, addr, -(n * 4));
7833 store_reg(s, rn, addr);
7835 tcg_temp_free_i32(addr);
7838 store_reg(s, rn, loaded_var);
7840 if ((insn & (1 << 22)) && !user) {
7841 /* Restore CPSR from SPSR. */
7842 tmp = load_cpu_field(spsr);
7843 gen_set_cpsr(tmp, 0xffffffff);
7844 tcg_temp_free_i32(tmp);
7845 s->is_jmp = DISAS_UPDATE;
7854 /* branch (and link) */
7855 val = (int32_t)s->pc;
7856 if (insn & (1 << 24)) {
7857 tmp = tcg_temp_new_i32();
7858 tcg_gen_movi_i32(tmp, val);
7859 store_reg(s, 14, tmp);
7861 offset = (((int32_t)insn << 8) >> 8);
7862 val += (offset << 2) + 4;
7870 if (disas_coproc_insn(env, s, insn))
7875 gen_set_pc_im(s->pc);
7876 s->is_jmp = DISAS_SWI;
7880 gen_exception_insn(s, 4, EXCP_UDEF);
7886 /* Return true if this is a Thumb-2 logical op. */
7888 thumb2_logic_op(int op)
7893 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7894 then set condition code flags based on the result of the operation.
7895 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7896 to the high bit of T1.
7897 Returns zero if the opcode is valid. */
7900 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7907 tcg_gen_and_i32(t0, t0, t1);
7911 tcg_gen_andc_i32(t0, t0, t1);
7915 tcg_gen_or_i32(t0, t0, t1);
7919 tcg_gen_orc_i32(t0, t0, t1);
7923 tcg_gen_xor_i32(t0, t0, t1);
7928 gen_helper_add_cc(t0, t0, t1);
7930 tcg_gen_add_i32(t0, t0, t1);
7934 gen_helper_adc_cc(t0, t0, t1);
7940 gen_helper_sbc_cc(t0, t0, t1);
7942 gen_sub_carry(t0, t0, t1);
7946 gen_helper_sub_cc(t0, t0, t1);
7948 tcg_gen_sub_i32(t0, t0, t1);
7952 gen_helper_sub_cc(t0, t1, t0);
7954 tcg_gen_sub_i32(t0, t1, t0);
7956 default: /* 5, 6, 7, 9, 12, 15. */
7962 gen_set_CF_bit31(t1);
7967 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7969 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
7971 uint32_t insn, imm, shift, offset;
7972 uint32_t rd, rn, rm, rs;
7983 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7984 || arm_feature (env, ARM_FEATURE_M))) {
7985 /* Thumb-1 cores may need to treat bl and blx as a pair of
7986 16-bit instructions to get correct prefetch abort behavior. */
7988 if ((insn & (1 << 12)) == 0) {
7990 /* Second half of blx. */
7991 offset = ((insn & 0x7ff) << 1);
7992 tmp = load_reg(s, 14);
7993 tcg_gen_addi_i32(tmp, tmp, offset);
7994 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7996 tmp2 = tcg_temp_new_i32();
7997 tcg_gen_movi_i32(tmp2, s->pc | 1);
7998 store_reg(s, 14, tmp2);
8002 if (insn & (1 << 11)) {
8003 /* Second half of bl. */
8004 offset = ((insn & 0x7ff) << 1) | 1;
8005 tmp = load_reg(s, 14);
8006 tcg_gen_addi_i32(tmp, tmp, offset);
8008 tmp2 = tcg_temp_new_i32();
8009 tcg_gen_movi_i32(tmp2, s->pc | 1);
8010 store_reg(s, 14, tmp2);
8014 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8015 /* Instruction spans a page boundary. Implement it as two
8016 16-bit instructions in case the second half causes an
8018 offset = ((int32_t)insn << 21) >> 9;
8019 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8022 /* Fall through to 32-bit decode. */
8025 insn = arm_lduw_code(s->pc, s->bswap_code);
8027 insn |= (uint32_t)insn_hw1 << 16;
8029 if ((insn & 0xf800e800) != 0xf000e800) {
8033 rn = (insn >> 16) & 0xf;
8034 rs = (insn >> 12) & 0xf;
8035 rd = (insn >> 8) & 0xf;
8037 switch ((insn >> 25) & 0xf) {
8038 case 0: case 1: case 2: case 3:
8039 /* 16-bit instructions. Should never happen. */
8042 if (insn & (1 << 22)) {
8043 /* Other load/store, table branch. */
8044 if (insn & 0x01200000) {
8045 /* Load/store doubleword. */
8047 addr = tcg_temp_new_i32();
8048 tcg_gen_movi_i32(addr, s->pc & ~3);
8050 addr = load_reg(s, rn);
8052 offset = (insn & 0xff) * 4;
8053 if ((insn & (1 << 23)) == 0)
8055 if (insn & (1 << 24)) {
8056 tcg_gen_addi_i32(addr, addr, offset);
8059 if (insn & (1 << 20)) {
8061 tmp = gen_ld32(addr, IS_USER(s));
8062 store_reg(s, rs, tmp);
8063 tcg_gen_addi_i32(addr, addr, 4);
8064 tmp = gen_ld32(addr, IS_USER(s));
8065 store_reg(s, rd, tmp);
8068 tmp = load_reg(s, rs);
8069 gen_st32(tmp, addr, IS_USER(s));
8070 tcg_gen_addi_i32(addr, addr, 4);
8071 tmp = load_reg(s, rd);
8072 gen_st32(tmp, addr, IS_USER(s));
8074 if (insn & (1 << 21)) {
8075 /* Base writeback. */
8078 tcg_gen_addi_i32(addr, addr, offset - 4);
8079 store_reg(s, rn, addr);
8081 tcg_temp_free_i32(addr);
8083 } else if ((insn & (1 << 23)) == 0) {
8084 /* Load/store exclusive word. */
8085 addr = tcg_temp_local_new();
8086 load_reg_var(s, addr, rn);
8087 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8088 if (insn & (1 << 20)) {
8089 gen_load_exclusive(s, rs, 15, addr, 2);
8091 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8093 tcg_temp_free(addr);
8094 } else if ((insn & (1 << 6)) == 0) {
8097 addr = tcg_temp_new_i32();
8098 tcg_gen_movi_i32(addr, s->pc);
8100 addr = load_reg(s, rn);
8102 tmp = load_reg(s, rm);
8103 tcg_gen_add_i32(addr, addr, tmp);
8104 if (insn & (1 << 4)) {
8106 tcg_gen_add_i32(addr, addr, tmp);
8107 tcg_temp_free_i32(tmp);
8108 tmp = gen_ld16u(addr, IS_USER(s));
8110 tcg_temp_free_i32(tmp);
8111 tmp = gen_ld8u(addr, IS_USER(s));
8113 tcg_temp_free_i32(addr);
8114 tcg_gen_shli_i32(tmp, tmp, 1);
8115 tcg_gen_addi_i32(tmp, tmp, s->pc);
8116 store_reg(s, 15, tmp);
8118 /* Load/store exclusive byte/halfword/doubleword. */
8120 op = (insn >> 4) & 0x3;
8124 addr = tcg_temp_local_new();
8125 load_reg_var(s, addr, rn);
8126 if (insn & (1 << 20)) {
8127 gen_load_exclusive(s, rs, rd, addr, op);
8129 gen_store_exclusive(s, rm, rs, rd, addr, op);
8131 tcg_temp_free(addr);
8134 /* Load/store multiple, RFE, SRS. */
8135 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8136 /* Not available in user mode. */
8139 if (insn & (1 << 20)) {
8141 addr = load_reg(s, rn);
8142 if ((insn & (1 << 24)) == 0)
8143 tcg_gen_addi_i32(addr, addr, -8);
8144 /* Load PC into tmp and CPSR into tmp2. */
8145 tmp = gen_ld32(addr, 0);
8146 tcg_gen_addi_i32(addr, addr, 4);
8147 tmp2 = gen_ld32(addr, 0);
8148 if (insn & (1 << 21)) {
8149 /* Base writeback. */
8150 if (insn & (1 << 24)) {
8151 tcg_gen_addi_i32(addr, addr, 4);
8153 tcg_gen_addi_i32(addr, addr, -4);
8155 store_reg(s, rn, addr);
8157 tcg_temp_free_i32(addr);
8159 gen_rfe(s, tmp, tmp2);
8163 addr = tcg_temp_new_i32();
8164 tmp = tcg_const_i32(op);
8165 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8166 tcg_temp_free_i32(tmp);
8167 if ((insn & (1 << 24)) == 0) {
8168 tcg_gen_addi_i32(addr, addr, -8);
8170 tmp = load_reg(s, 14);
8171 gen_st32(tmp, addr, 0);
8172 tcg_gen_addi_i32(addr, addr, 4);
8173 tmp = tcg_temp_new_i32();
8174 gen_helper_cpsr_read(tmp);
8175 gen_st32(tmp, addr, 0);
8176 if (insn & (1 << 21)) {
8177 if ((insn & (1 << 24)) == 0) {
8178 tcg_gen_addi_i32(addr, addr, -4);
8180 tcg_gen_addi_i32(addr, addr, 4);
8182 tmp = tcg_const_i32(op);
8183 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8184 tcg_temp_free_i32(tmp);
8186 tcg_temp_free_i32(addr);
8190 int i, loaded_base = 0;
8192 /* Load/store multiple. */
8193 addr = load_reg(s, rn);
8195 for (i = 0; i < 16; i++) {
8196 if (insn & (1 << i))
8199 if (insn & (1 << 24)) {
8200 tcg_gen_addi_i32(addr, addr, -offset);
8203 TCGV_UNUSED(loaded_var);
8204 for (i = 0; i < 16; i++) {
8205 if ((insn & (1 << i)) == 0)
8207 if (insn & (1 << 20)) {
8209 tmp = gen_ld32(addr, IS_USER(s));
8212 } else if (i == rn) {
8216 store_reg(s, i, tmp);
8220 tmp = load_reg(s, i);
8221 gen_st32(tmp, addr, IS_USER(s));
8223 tcg_gen_addi_i32(addr, addr, 4);
8226 store_reg(s, rn, loaded_var);
8228 if (insn & (1 << 21)) {
8229 /* Base register writeback. */
8230 if (insn & (1 << 24)) {
8231 tcg_gen_addi_i32(addr, addr, -offset);
8233 /* Fault if writeback register is in register list. */
8234 if (insn & (1 << rn))
8236 store_reg(s, rn, addr);
8238 tcg_temp_free_i32(addr);
8245 op = (insn >> 21) & 0xf;
8247 /* Halfword pack. */
8248 tmp = load_reg(s, rn);
8249 tmp2 = load_reg(s, rm);
8250 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8251 if (insn & (1 << 5)) {
8255 tcg_gen_sari_i32(tmp2, tmp2, shift);
8256 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8257 tcg_gen_ext16u_i32(tmp2, tmp2);
8261 tcg_gen_shli_i32(tmp2, tmp2, shift);
8262 tcg_gen_ext16u_i32(tmp, tmp);
8263 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8265 tcg_gen_or_i32(tmp, tmp, tmp2);
8266 tcg_temp_free_i32(tmp2);
8267 store_reg(s, rd, tmp);
8269 /* Data processing register constant shift. */
8271 tmp = tcg_temp_new_i32();
8272 tcg_gen_movi_i32(tmp, 0);
8274 tmp = load_reg(s, rn);
8276 tmp2 = load_reg(s, rm);
8278 shiftop = (insn >> 4) & 3;
8279 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8280 conds = (insn & (1 << 20)) != 0;
8281 logic_cc = (conds && thumb2_logic_op(op));
8282 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8283 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8285 tcg_temp_free_i32(tmp2);
8287 store_reg(s, rd, tmp);
8289 tcg_temp_free_i32(tmp);
8293 case 13: /* Misc data processing. */
8294 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8295 if (op < 4 && (insn & 0xf000) != 0xf000)
8298 case 0: /* Register controlled shift. */
8299 tmp = load_reg(s, rn);
8300 tmp2 = load_reg(s, rm);
8301 if ((insn & 0x70) != 0)
8303 op = (insn >> 21) & 3;
8304 logic_cc = (insn & (1 << 20)) != 0;
8305 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8308 store_reg_bx(env, s, rd, tmp);
8310 case 1: /* Sign/zero extend. */
8311 tmp = load_reg(s, rm);
8312 shift = (insn >> 4) & 3;
8313 /* ??? In many cases it's not necessary to do a
8314 rotate, a shift is sufficient. */
8316 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8317 op = (insn >> 20) & 7;
8319 case 0: gen_sxth(tmp); break;
8320 case 1: gen_uxth(tmp); break;
8321 case 2: gen_sxtb16(tmp); break;
8322 case 3: gen_uxtb16(tmp); break;
8323 case 4: gen_sxtb(tmp); break;
8324 case 5: gen_uxtb(tmp); break;
8325 default: goto illegal_op;
8328 tmp2 = load_reg(s, rn);
8329 if ((op >> 1) == 1) {
8330 gen_add16(tmp, tmp2);
8332 tcg_gen_add_i32(tmp, tmp, tmp2);
8333 tcg_temp_free_i32(tmp2);
8336 store_reg(s, rd, tmp);
8338 case 2: /* SIMD add/subtract. */
8339 op = (insn >> 20) & 7;
8340 shift = (insn >> 4) & 7;
8341 if ((op & 3) == 3 || (shift & 3) == 3)
8343 tmp = load_reg(s, rn);
8344 tmp2 = load_reg(s, rm);
8345 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8346 tcg_temp_free_i32(tmp2);
8347 store_reg(s, rd, tmp);
8349 case 3: /* Other data processing. */
8350 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8352 /* Saturating add/subtract. */
8353 tmp = load_reg(s, rn);
8354 tmp2 = load_reg(s, rm);
8356 gen_helper_double_saturate(tmp, tmp);
8358 gen_helper_sub_saturate(tmp, tmp2, tmp);
8360 gen_helper_add_saturate(tmp, tmp, tmp2);
8361 tcg_temp_free_i32(tmp2);
8363 tmp = load_reg(s, rn);
8365 case 0x0a: /* rbit */
8366 gen_helper_rbit(tmp, tmp);
8368 case 0x08: /* rev */
8369 tcg_gen_bswap32_i32(tmp, tmp);
8371 case 0x09: /* rev16 */
8374 case 0x0b: /* revsh */
8377 case 0x10: /* sel */
8378 tmp2 = load_reg(s, rm);
8379 tmp3 = tcg_temp_new_i32();
8380 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8381 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8382 tcg_temp_free_i32(tmp3);
8383 tcg_temp_free_i32(tmp2);
8385 case 0x18: /* clz */
8386 gen_helper_clz(tmp, tmp);
8392 store_reg(s, rd, tmp);
8394 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8395 op = (insn >> 4) & 0xf;
8396 tmp = load_reg(s, rn);
8397 tmp2 = load_reg(s, rm);
8398 switch ((insn >> 20) & 7) {
8399 case 0: /* 32 x 32 -> 32 */
8400 tcg_gen_mul_i32(tmp, tmp, tmp2);
8401 tcg_temp_free_i32(tmp2);
8403 tmp2 = load_reg(s, rs);
8405 tcg_gen_sub_i32(tmp, tmp2, tmp);
8407 tcg_gen_add_i32(tmp, tmp, tmp2);
8408 tcg_temp_free_i32(tmp2);
8411 case 1: /* 16 x 16 -> 32 */
8412 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8413 tcg_temp_free_i32(tmp2);
8415 tmp2 = load_reg(s, rs);
8416 gen_helper_add_setq(tmp, tmp, tmp2);
8417 tcg_temp_free_i32(tmp2);
8420 case 2: /* Dual multiply add. */
8421 case 4: /* Dual multiply subtract. */
8423 gen_swap_half(tmp2);
8424 gen_smul_dual(tmp, tmp2);
8425 if (insn & (1 << 22)) {
8426 /* This subtraction cannot overflow. */
8427 tcg_gen_sub_i32(tmp, tmp, tmp2);
8429 /* This addition cannot overflow 32 bits;
8430 * however it may overflow considered as a signed
8431 * operation, in which case we must set the Q flag.
8433 gen_helper_add_setq(tmp, tmp, tmp2);
8435 tcg_temp_free_i32(tmp2);
8438 tmp2 = load_reg(s, rs);
8439 gen_helper_add_setq(tmp, tmp, tmp2);
8440 tcg_temp_free_i32(tmp2);
8443 case 3: /* 32 * 16 -> 32msb */
8445 tcg_gen_sari_i32(tmp2, tmp2, 16);
8448 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8449 tcg_gen_shri_i64(tmp64, tmp64, 16);
8450 tmp = tcg_temp_new_i32();
8451 tcg_gen_trunc_i64_i32(tmp, tmp64);
8452 tcg_temp_free_i64(tmp64);
8455 tmp2 = load_reg(s, rs);
8456 gen_helper_add_setq(tmp, tmp, tmp2);
8457 tcg_temp_free_i32(tmp2);
8460 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8461 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8463 tmp = load_reg(s, rs);
8464 if (insn & (1 << 20)) {
8465 tmp64 = gen_addq_msw(tmp64, tmp);
8467 tmp64 = gen_subq_msw(tmp64, tmp);
8470 if (insn & (1 << 4)) {
8471 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8473 tcg_gen_shri_i64(tmp64, tmp64, 32);
8474 tmp = tcg_temp_new_i32();
8475 tcg_gen_trunc_i64_i32(tmp, tmp64);
8476 tcg_temp_free_i64(tmp64);
8478 case 7: /* Unsigned sum of absolute differences. */
8479 gen_helper_usad8(tmp, tmp, tmp2);
8480 tcg_temp_free_i32(tmp2);
8482 tmp2 = load_reg(s, rs);
8483 tcg_gen_add_i32(tmp, tmp, tmp2);
8484 tcg_temp_free_i32(tmp2);
8488 store_reg(s, rd, tmp);
8490 case 6: case 7: /* 64-bit multiply, Divide. */
8491 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8492 tmp = load_reg(s, rn);
8493 tmp2 = load_reg(s, rm);
8494 if ((op & 0x50) == 0x10) {
8496 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8500 gen_helper_udiv(tmp, tmp, tmp2);
8502 gen_helper_sdiv(tmp, tmp, tmp2);
8503 tcg_temp_free_i32(tmp2);
8504 store_reg(s, rd, tmp);
8505 } else if ((op & 0xe) == 0xc) {
8506 /* Dual multiply accumulate long. */
8508 gen_swap_half(tmp2);
8509 gen_smul_dual(tmp, tmp2);
8511 tcg_gen_sub_i32(tmp, tmp, tmp2);
8513 tcg_gen_add_i32(tmp, tmp, tmp2);
8515 tcg_temp_free_i32(tmp2);
8517 tmp64 = tcg_temp_new_i64();
8518 tcg_gen_ext_i32_i64(tmp64, tmp);
8519 tcg_temp_free_i32(tmp);
8520 gen_addq(s, tmp64, rs, rd);
8521 gen_storeq_reg(s, rs, rd, tmp64);
8522 tcg_temp_free_i64(tmp64);
8525 /* Unsigned 64-bit multiply */
8526 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8530 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8531 tcg_temp_free_i32(tmp2);
8532 tmp64 = tcg_temp_new_i64();
8533 tcg_gen_ext_i32_i64(tmp64, tmp);
8534 tcg_temp_free_i32(tmp);
8536 /* Signed 64-bit multiply */
8537 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8542 gen_addq_lo(s, tmp64, rs);
8543 gen_addq_lo(s, tmp64, rd);
8544 } else if (op & 0x40) {
8545 /* 64-bit accumulate. */
8546 gen_addq(s, tmp64, rs, rd);
8548 gen_storeq_reg(s, rs, rd, tmp64);
8549 tcg_temp_free_i64(tmp64);
8554 case 6: case 7: case 14: case 15:
8556 if (((insn >> 24) & 3) == 3) {
8557 /* Translate into the equivalent ARM encoding. */
8558 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8559 if (disas_neon_data_insn(env, s, insn))
8562 if (insn & (1 << 28))
8564 if (disas_coproc_insn (env, s, insn))
8568 case 8: case 9: case 10: case 11:
8569 if (insn & (1 << 15)) {
8570 /* Branches, misc control. */
8571 if (insn & 0x5000) {
8572 /* Unconditional branch. */
8573 /* signextend(hw1[10:0]) -> offset[:12]. */
8574 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8575 /* hw1[10:0] -> offset[11:1]. */
8576 offset |= (insn & 0x7ff) << 1;
8577 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8578 offset[24:22] already have the same value because of the
8579 sign extension above. */
8580 offset ^= ((~insn) & (1 << 13)) << 10;
8581 offset ^= ((~insn) & (1 << 11)) << 11;
8583 if (insn & (1 << 14)) {
8584 /* Branch and link. */
8585 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8589 if (insn & (1 << 12)) {
8594 offset &= ~(uint32_t)2;
8595 /* thumb2 bx, no need to check */
8596 gen_bx_im(s, offset);
8598 } else if (((insn >> 23) & 7) == 7) {
8600 if (insn & (1 << 13))
8603 if (insn & (1 << 26)) {
8604 /* Secure monitor call (v6Z) */
8605 goto illegal_op; /* not implemented. */
8607 op = (insn >> 20) & 7;
8609 case 0: /* msr cpsr. */
8611 tmp = load_reg(s, rn);
8612 addr = tcg_const_i32(insn & 0xff);
8613 gen_helper_v7m_msr(cpu_env, addr, tmp);
8614 tcg_temp_free_i32(addr);
8615 tcg_temp_free_i32(tmp);
8620 case 1: /* msr spsr. */
8623 tmp = load_reg(s, rn);
8625 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8629 case 2: /* cps, nop-hint. */
8630 if (((insn >> 8) & 7) == 0) {
8631 gen_nop_hint(s, insn & 0xff);
8633 /* Implemented as NOP in user mode. */
8638 if (insn & (1 << 10)) {
8639 if (insn & (1 << 7))
8641 if (insn & (1 << 6))
8643 if (insn & (1 << 5))
8645 if (insn & (1 << 9))
8646 imm = CPSR_A | CPSR_I | CPSR_F;
8648 if (insn & (1 << 8)) {
8650 imm |= (insn & 0x1f);
8653 gen_set_psr_im(s, offset, 0, imm);
8656 case 3: /* Special control operations. */
8658 op = (insn >> 4) & 0xf;
8666 /* These execute as NOPs. */
8673 /* Trivial implementation equivalent to bx. */
8674 tmp = load_reg(s, rn);
8677 case 5: /* Exception return. */
8681 if (rn != 14 || rd != 15) {
8684 tmp = load_reg(s, rn);
8685 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8686 gen_exception_return(s, tmp);
8688 case 6: /* mrs cpsr. */
8689 tmp = tcg_temp_new_i32();
8691 addr = tcg_const_i32(insn & 0xff);
8692 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8693 tcg_temp_free_i32(addr);
8695 gen_helper_cpsr_read(tmp);
8697 store_reg(s, rd, tmp);
8699 case 7: /* mrs spsr. */
8700 /* Not accessible in user mode. */
8701 if (IS_USER(s) || IS_M(env))
8703 tmp = load_cpu_field(spsr);
8704 store_reg(s, rd, tmp);
8709 /* Conditional branch. */
8710 op = (insn >> 22) & 0xf;
8711 /* Generate a conditional jump to next instruction. */
8712 s->condlabel = gen_new_label();
8713 gen_test_cc(op ^ 1, s->condlabel);
8716 /* offset[11:1] = insn[10:0] */
8717 offset = (insn & 0x7ff) << 1;
8718 /* offset[17:12] = insn[21:16]. */
8719 offset |= (insn & 0x003f0000) >> 4;
8720 /* offset[31:20] = insn[26]. */
8721 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8722 /* offset[18] = insn[13]. */
8723 offset |= (insn & (1 << 13)) << 5;
8724 /* offset[19] = insn[11]. */
8725 offset |= (insn & (1 << 11)) << 8;
8727 /* jump to the offset */
8728 gen_jmp(s, s->pc + offset);
8731 /* Data processing immediate. */
8732 if (insn & (1 << 25)) {
8733 if (insn & (1 << 24)) {
8734 if (insn & (1 << 20))
8736 /* Bitfield/Saturate. */
8737 op = (insn >> 21) & 7;
8739 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8741 tmp = tcg_temp_new_i32();
8742 tcg_gen_movi_i32(tmp, 0);
8744 tmp = load_reg(s, rn);
8747 case 2: /* Signed bitfield extract. */
8749 if (shift + imm > 32)
8752 gen_sbfx(tmp, shift, imm);
8754 case 6: /* Unsigned bitfield extract. */
8756 if (shift + imm > 32)
8759 gen_ubfx(tmp, shift, (1u << imm) - 1);
8761 case 3: /* Bitfield insert/clear. */
8764 imm = imm + 1 - shift;
8766 tmp2 = load_reg(s, rd);
8767 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8768 tcg_temp_free_i32(tmp2);
8773 default: /* Saturate. */
8776 tcg_gen_sari_i32(tmp, tmp, shift);
8778 tcg_gen_shli_i32(tmp, tmp, shift);
8780 tmp2 = tcg_const_i32(imm);
8783 if ((op & 1) && shift == 0)
8784 gen_helper_usat16(tmp, tmp, tmp2);
8786 gen_helper_usat(tmp, tmp, tmp2);
8789 if ((op & 1) && shift == 0)
8790 gen_helper_ssat16(tmp, tmp, tmp2);
8792 gen_helper_ssat(tmp, tmp, tmp2);
8794 tcg_temp_free_i32(tmp2);
8797 store_reg(s, rd, tmp);
8799 imm = ((insn & 0x04000000) >> 15)
8800 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8801 if (insn & (1 << 22)) {
8802 /* 16-bit immediate. */
8803 imm |= (insn >> 4) & 0xf000;
8804 if (insn & (1 << 23)) {
8806 tmp = load_reg(s, rd);
8807 tcg_gen_ext16u_i32(tmp, tmp);
8808 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8811 tmp = tcg_temp_new_i32();
8812 tcg_gen_movi_i32(tmp, imm);
8815 /* Add/sub 12-bit immediate. */
8817 offset = s->pc & ~(uint32_t)3;
8818 if (insn & (1 << 23))
8822 tmp = tcg_temp_new_i32();
8823 tcg_gen_movi_i32(tmp, offset);
8825 tmp = load_reg(s, rn);
8826 if (insn & (1 << 23))
8827 tcg_gen_subi_i32(tmp, tmp, imm);
8829 tcg_gen_addi_i32(tmp, tmp, imm);
8832 store_reg(s, rd, tmp);
8835 int shifter_out = 0;
8836 /* modified 12-bit immediate. */
8837 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8838 imm = (insn & 0xff);
8841 /* Nothing to do. */
8843 case 1: /* 00XY00XY */
8846 case 2: /* XY00XY00 */
8850 case 3: /* XYXYXYXY */
8854 default: /* Rotated constant. */
8855 shift = (shift << 1) | (imm >> 7);
8857 imm = imm << (32 - shift);
8861 tmp2 = tcg_temp_new_i32();
8862 tcg_gen_movi_i32(tmp2, imm);
8863 rn = (insn >> 16) & 0xf;
8865 tmp = tcg_temp_new_i32();
8866 tcg_gen_movi_i32(tmp, 0);
8868 tmp = load_reg(s, rn);
8870 op = (insn >> 21) & 0xf;
8871 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8872 shifter_out, tmp, tmp2))
8874 tcg_temp_free_i32(tmp2);
8875 rd = (insn >> 8) & 0xf;
8877 store_reg(s, rd, tmp);
8879 tcg_temp_free_i32(tmp);
8884 case 12: /* Load/store single data item. */
8889 if ((insn & 0x01100000) == 0x01000000) {
8890 if (disas_neon_ls_insn(env, s, insn))
8894 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8896 if (!(insn & (1 << 20))) {
8900 /* Byte or halfword load space with dest == r15 : memory hints.
8901 * Catch them early so we don't emit pointless addressing code.
8902 * This space is a mix of:
8903 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8904 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8906 * unallocated hints, which must be treated as NOPs
8907 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8908 * which is easiest for the decoding logic
8909 * Some space which must UNDEF
8911 int op1 = (insn >> 23) & 3;
8912 int op2 = (insn >> 6) & 0x3f;
8917 /* UNPREDICTABLE, unallocated hint or
8918 * PLD/PLDW/PLI (literal)
8923 return 0; /* PLD/PLDW/PLI or unallocated hint */
8925 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8926 return 0; /* PLD/PLDW/PLI or unallocated hint */
8928 /* UNDEF space, or an UNPREDICTABLE */
8934 addr = tcg_temp_new_i32();
8936 /* s->pc has already been incremented by 4. */
8937 imm = s->pc & 0xfffffffc;
8938 if (insn & (1 << 23))
8939 imm += insn & 0xfff;
8941 imm -= insn & 0xfff;
8942 tcg_gen_movi_i32(addr, imm);
8944 addr = load_reg(s, rn);
8945 if (insn & (1 << 23)) {
8946 /* Positive offset. */
8948 tcg_gen_addi_i32(addr, addr, imm);
8951 switch ((insn >> 8) & 0xf) {
8952 case 0x0: /* Shifted Register. */
8953 shift = (insn >> 4) & 0xf;
8955 tcg_temp_free_i32(addr);
8958 tmp = load_reg(s, rm);
8960 tcg_gen_shli_i32(tmp, tmp, shift);
8961 tcg_gen_add_i32(addr, addr, tmp);
8962 tcg_temp_free_i32(tmp);
8964 case 0xc: /* Negative offset. */
8965 tcg_gen_addi_i32(addr, addr, -imm);
8967 case 0xe: /* User privilege. */
8968 tcg_gen_addi_i32(addr, addr, imm);
8971 case 0x9: /* Post-decrement. */
8974 case 0xb: /* Post-increment. */
8978 case 0xd: /* Pre-decrement. */
8981 case 0xf: /* Pre-increment. */
8982 tcg_gen_addi_i32(addr, addr, imm);
8986 tcg_temp_free_i32(addr);
8991 if (insn & (1 << 20)) {
8994 case 0: tmp = gen_ld8u(addr, user); break;
8995 case 4: tmp = gen_ld8s(addr, user); break;
8996 case 1: tmp = gen_ld16u(addr, user); break;
8997 case 5: tmp = gen_ld16s(addr, user); break;
8998 case 2: tmp = gen_ld32(addr, user); break;
9000 tcg_temp_free_i32(addr);
9006 store_reg(s, rs, tmp);
9010 tmp = load_reg(s, rs);
9012 case 0: gen_st8(tmp, addr, user); break;
9013 case 1: gen_st16(tmp, addr, user); break;
9014 case 2: gen_st32(tmp, addr, user); break;
9016 tcg_temp_free_i32(addr);
9021 tcg_gen_addi_i32(addr, addr, imm);
9023 store_reg(s, rn, addr);
9025 tcg_temp_free_i32(addr);
9037 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9039 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9046 if (s->condexec_mask) {
9047 cond = s->condexec_cond;
9048 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9049 s->condlabel = gen_new_label();
9050 gen_test_cc(cond ^ 1, s->condlabel);
9055 insn = arm_lduw_code(s->pc, s->bswap_code);
9058 switch (insn >> 12) {
9062 op = (insn >> 11) & 3;
9065 rn = (insn >> 3) & 7;
9066 tmp = load_reg(s, rn);
9067 if (insn & (1 << 10)) {
9069 tmp2 = tcg_temp_new_i32();
9070 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9073 rm = (insn >> 6) & 7;
9074 tmp2 = load_reg(s, rm);
9076 if (insn & (1 << 9)) {
9077 if (s->condexec_mask)
9078 tcg_gen_sub_i32(tmp, tmp, tmp2);
9080 gen_helper_sub_cc(tmp, tmp, tmp2);
9082 if (s->condexec_mask)
9083 tcg_gen_add_i32(tmp, tmp, tmp2);
9085 gen_helper_add_cc(tmp, tmp, tmp2);
9087 tcg_temp_free_i32(tmp2);
9088 store_reg(s, rd, tmp);
9090 /* shift immediate */
9091 rm = (insn >> 3) & 7;
9092 shift = (insn >> 6) & 0x1f;
9093 tmp = load_reg(s, rm);
9094 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9095 if (!s->condexec_mask)
9097 store_reg(s, rd, tmp);
9101 /* arithmetic large immediate */
9102 op = (insn >> 11) & 3;
9103 rd = (insn >> 8) & 0x7;
9104 if (op == 0) { /* mov */
9105 tmp = tcg_temp_new_i32();
9106 tcg_gen_movi_i32(tmp, insn & 0xff);
9107 if (!s->condexec_mask)
9109 store_reg(s, rd, tmp);
9111 tmp = load_reg(s, rd);
9112 tmp2 = tcg_temp_new_i32();
9113 tcg_gen_movi_i32(tmp2, insn & 0xff);
9116 gen_helper_sub_cc(tmp, tmp, tmp2);
9117 tcg_temp_free_i32(tmp);
9118 tcg_temp_free_i32(tmp2);
9121 if (s->condexec_mask)
9122 tcg_gen_add_i32(tmp, tmp, tmp2);
9124 gen_helper_add_cc(tmp, tmp, tmp2);
9125 tcg_temp_free_i32(tmp2);
9126 store_reg(s, rd, tmp);
9129 if (s->condexec_mask)
9130 tcg_gen_sub_i32(tmp, tmp, tmp2);
9132 gen_helper_sub_cc(tmp, tmp, tmp2);
9133 tcg_temp_free_i32(tmp2);
9134 store_reg(s, rd, tmp);
9140 if (insn & (1 << 11)) {
9141 rd = (insn >> 8) & 7;
9142 /* load pc-relative. Bit 1 of PC is ignored. */
9143 val = s->pc + 2 + ((insn & 0xff) * 4);
9144 val &= ~(uint32_t)2;
9145 addr = tcg_temp_new_i32();
9146 tcg_gen_movi_i32(addr, val);
9147 tmp = gen_ld32(addr, IS_USER(s));
9148 tcg_temp_free_i32(addr);
9149 store_reg(s, rd, tmp);
9152 if (insn & (1 << 10)) {
9153 /* data processing extended or blx */
9154 rd = (insn & 7) | ((insn >> 4) & 8);
9155 rm = (insn >> 3) & 0xf;
9156 op = (insn >> 8) & 3;
9159 tmp = load_reg(s, rd);
9160 tmp2 = load_reg(s, rm);
9161 tcg_gen_add_i32(tmp, tmp, tmp2);
9162 tcg_temp_free_i32(tmp2);
9163 store_reg(s, rd, tmp);
9166 tmp = load_reg(s, rd);
9167 tmp2 = load_reg(s, rm);
9168 gen_helper_sub_cc(tmp, tmp, tmp2);
9169 tcg_temp_free_i32(tmp2);
9170 tcg_temp_free_i32(tmp);
9172 case 2: /* mov/cpy */
9173 tmp = load_reg(s, rm);
9174 store_reg(s, rd, tmp);
9176 case 3:/* branch [and link] exchange thumb register */
9177 tmp = load_reg(s, rm);
9178 if (insn & (1 << 7)) {
9180 val = (uint32_t)s->pc | 1;
9181 tmp2 = tcg_temp_new_i32();
9182 tcg_gen_movi_i32(tmp2, val);
9183 store_reg(s, 14, tmp2);
9185 /* already thumb, no need to check */
9192 /* data processing register */
9194 rm = (insn >> 3) & 7;
9195 op = (insn >> 6) & 0xf;
9196 if (op == 2 || op == 3 || op == 4 || op == 7) {
9197 /* the shift/rotate ops want the operands backwards */
9206 if (op == 9) { /* neg */
9207 tmp = tcg_temp_new_i32();
9208 tcg_gen_movi_i32(tmp, 0);
9209 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9210 tmp = load_reg(s, rd);
9215 tmp2 = load_reg(s, rm);
9218 tcg_gen_and_i32(tmp, tmp, tmp2);
9219 if (!s->condexec_mask)
9223 tcg_gen_xor_i32(tmp, tmp, tmp2);
9224 if (!s->condexec_mask)
9228 if (s->condexec_mask) {
9229 gen_helper_shl(tmp2, tmp2, tmp);
9231 gen_helper_shl_cc(tmp2, tmp2, tmp);
9236 if (s->condexec_mask) {
9237 gen_helper_shr(tmp2, tmp2, tmp);
9239 gen_helper_shr_cc(tmp2, tmp2, tmp);
9244 if (s->condexec_mask) {
9245 gen_helper_sar(tmp2, tmp2, tmp);
9247 gen_helper_sar_cc(tmp2, tmp2, tmp);
9252 if (s->condexec_mask)
9255 gen_helper_adc_cc(tmp, tmp, tmp2);
9258 if (s->condexec_mask)
9259 gen_sub_carry(tmp, tmp, tmp2);
9261 gen_helper_sbc_cc(tmp, tmp, tmp2);
9264 if (s->condexec_mask) {
9265 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9266 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9268 gen_helper_ror_cc(tmp2, tmp2, tmp);
9273 tcg_gen_and_i32(tmp, tmp, tmp2);
9278 if (s->condexec_mask)
9279 tcg_gen_neg_i32(tmp, tmp2);
9281 gen_helper_sub_cc(tmp, tmp, tmp2);
9284 gen_helper_sub_cc(tmp, tmp, tmp2);
9288 gen_helper_add_cc(tmp, tmp, tmp2);
9292 tcg_gen_or_i32(tmp, tmp, tmp2);
9293 if (!s->condexec_mask)
9297 tcg_gen_mul_i32(tmp, tmp, tmp2);
9298 if (!s->condexec_mask)
9302 tcg_gen_andc_i32(tmp, tmp, tmp2);
9303 if (!s->condexec_mask)
9307 tcg_gen_not_i32(tmp2, tmp2);
9308 if (!s->condexec_mask)
9316 store_reg(s, rm, tmp2);
9318 tcg_temp_free_i32(tmp);
9320 store_reg(s, rd, tmp);
9321 tcg_temp_free_i32(tmp2);
9324 tcg_temp_free_i32(tmp);
9325 tcg_temp_free_i32(tmp2);
9330 /* load/store register offset. */
9332 rn = (insn >> 3) & 7;
9333 rm = (insn >> 6) & 7;
9334 op = (insn >> 9) & 7;
9335 addr = load_reg(s, rn);
9336 tmp = load_reg(s, rm);
9337 tcg_gen_add_i32(addr, addr, tmp);
9338 tcg_temp_free_i32(tmp);
9340 if (op < 3) /* store */
9341 tmp = load_reg(s, rd);
9345 gen_st32(tmp, addr, IS_USER(s));
9348 gen_st16(tmp, addr, IS_USER(s));
9351 gen_st8(tmp, addr, IS_USER(s));
9354 tmp = gen_ld8s(addr, IS_USER(s));
9357 tmp = gen_ld32(addr, IS_USER(s));
9360 tmp = gen_ld16u(addr, IS_USER(s));
9363 tmp = gen_ld8u(addr, IS_USER(s));
9366 tmp = gen_ld16s(addr, IS_USER(s));
9369 if (op >= 3) /* load */
9370 store_reg(s, rd, tmp);
9371 tcg_temp_free_i32(addr);
9375 /* load/store word immediate offset */
9377 rn = (insn >> 3) & 7;
9378 addr = load_reg(s, rn);
9379 val = (insn >> 4) & 0x7c;
9380 tcg_gen_addi_i32(addr, addr, val);
9382 if (insn & (1 << 11)) {
9384 tmp = gen_ld32(addr, IS_USER(s));
9385 store_reg(s, rd, tmp);
9388 tmp = load_reg(s, rd);
9389 gen_st32(tmp, addr, IS_USER(s));
9391 tcg_temp_free_i32(addr);
9395 /* load/store byte immediate offset */
9397 rn = (insn >> 3) & 7;
9398 addr = load_reg(s, rn);
9399 val = (insn >> 6) & 0x1f;
9400 tcg_gen_addi_i32(addr, addr, val);
9402 if (insn & (1 << 11)) {
9404 tmp = gen_ld8u(addr, IS_USER(s));
9405 store_reg(s, rd, tmp);
9408 tmp = load_reg(s, rd);
9409 gen_st8(tmp, addr, IS_USER(s));
9411 tcg_temp_free_i32(addr);
9415 /* load/store halfword immediate offset */
9417 rn = (insn >> 3) & 7;
9418 addr = load_reg(s, rn);
9419 val = (insn >> 5) & 0x3e;
9420 tcg_gen_addi_i32(addr, addr, val);
9422 if (insn & (1 << 11)) {
9424 tmp = gen_ld16u(addr, IS_USER(s));
9425 store_reg(s, rd, tmp);
9428 tmp = load_reg(s, rd);
9429 gen_st16(tmp, addr, IS_USER(s));
9431 tcg_temp_free_i32(addr);
9435 /* load/store from stack */
9436 rd = (insn >> 8) & 7;
9437 addr = load_reg(s, 13);
9438 val = (insn & 0xff) * 4;
9439 tcg_gen_addi_i32(addr, addr, val);
9441 if (insn & (1 << 11)) {
9443 tmp = gen_ld32(addr, IS_USER(s));
9444 store_reg(s, rd, tmp);
9447 tmp = load_reg(s, rd);
9448 gen_st32(tmp, addr, IS_USER(s));
9450 tcg_temp_free_i32(addr);
9454 /* add to high reg */
9455 rd = (insn >> 8) & 7;
9456 if (insn & (1 << 11)) {
9458 tmp = load_reg(s, 13);
9460 /* PC. bit 1 is ignored. */
9461 tmp = tcg_temp_new_i32();
9462 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9464 val = (insn & 0xff) * 4;
9465 tcg_gen_addi_i32(tmp, tmp, val);
9466 store_reg(s, rd, tmp);
9471 op = (insn >> 8) & 0xf;
9474 /* adjust stack pointer */
9475 tmp = load_reg(s, 13);
9476 val = (insn & 0x7f) * 4;
9477 if (insn & (1 << 7))
9478 val = -(int32_t)val;
9479 tcg_gen_addi_i32(tmp, tmp, val);
9480 store_reg(s, 13, tmp);
9483 case 2: /* sign/zero extend. */
9486 rm = (insn >> 3) & 7;
9487 tmp = load_reg(s, rm);
9488 switch ((insn >> 6) & 3) {
9489 case 0: gen_sxth(tmp); break;
9490 case 1: gen_sxtb(tmp); break;
9491 case 2: gen_uxth(tmp); break;
9492 case 3: gen_uxtb(tmp); break;
9494 store_reg(s, rd, tmp);
9496 case 4: case 5: case 0xc: case 0xd:
9498 addr = load_reg(s, 13);
9499 if (insn & (1 << 8))
9503 for (i = 0; i < 8; i++) {
9504 if (insn & (1 << i))
9507 if ((insn & (1 << 11)) == 0) {
9508 tcg_gen_addi_i32(addr, addr, -offset);
9510 for (i = 0; i < 8; i++) {
9511 if (insn & (1 << i)) {
9512 if (insn & (1 << 11)) {
9514 tmp = gen_ld32(addr, IS_USER(s));
9515 store_reg(s, i, tmp);
9518 tmp = load_reg(s, i);
9519 gen_st32(tmp, addr, IS_USER(s));
9521 /* advance to the next address. */
9522 tcg_gen_addi_i32(addr, addr, 4);
9526 if (insn & (1 << 8)) {
9527 if (insn & (1 << 11)) {
9529 tmp = gen_ld32(addr, IS_USER(s));
9530 /* don't set the pc until the rest of the instruction
9534 tmp = load_reg(s, 14);
9535 gen_st32(tmp, addr, IS_USER(s));
9537 tcg_gen_addi_i32(addr, addr, 4);
9539 if ((insn & (1 << 11)) == 0) {
9540 tcg_gen_addi_i32(addr, addr, -offset);
9542 /* write back the new stack pointer */
9543 store_reg(s, 13, addr);
9544 /* set the new PC value */
9545 if ((insn & 0x0900) == 0x0900) {
9546 store_reg_from_load(env, s, 15, tmp);
9550 case 1: case 3: case 9: case 11: /* czb */
9552 tmp = load_reg(s, rm);
9553 s->condlabel = gen_new_label();
9555 if (insn & (1 << 11))
9556 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9558 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9559 tcg_temp_free_i32(tmp);
9560 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9561 val = (uint32_t)s->pc + 2;
9566 case 15: /* IT, nop-hint. */
9567 if ((insn & 0xf) == 0) {
9568 gen_nop_hint(s, (insn >> 4) & 0xf);
9572 s->condexec_cond = (insn >> 4) & 0xe;
9573 s->condexec_mask = insn & 0x1f;
9574 /* No actual code generated for this insn, just setup state. */
9577 case 0xe: /* bkpt */
9579 gen_exception_insn(s, 2, EXCP_BKPT);
9584 rn = (insn >> 3) & 0x7;
9586 tmp = load_reg(s, rn);
9587 switch ((insn >> 6) & 3) {
9588 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9589 case 1: gen_rev16(tmp); break;
9590 case 3: gen_revsh(tmp); break;
9591 default: goto illegal_op;
9593 store_reg(s, rd, tmp);
9597 switch ((insn >> 5) & 7) {
9601 if (((insn >> 3) & 1) != s->bswap_code) {
9602 /* Dynamic endianness switching not implemented. */
9613 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9616 addr = tcg_const_i32(19);
9617 gen_helper_v7m_msr(cpu_env, addr, tmp);
9618 tcg_temp_free_i32(addr);
9622 addr = tcg_const_i32(16);
9623 gen_helper_v7m_msr(cpu_env, addr, tmp);
9624 tcg_temp_free_i32(addr);
9626 tcg_temp_free_i32(tmp);
9629 if (insn & (1 << 4)) {
9630 shift = CPSR_A | CPSR_I | CPSR_F;
9634 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9649 /* load/store multiple */
9651 TCGV_UNUSED(loaded_var);
9652 rn = (insn >> 8) & 0x7;
9653 addr = load_reg(s, rn);
9654 for (i = 0; i < 8; i++) {
9655 if (insn & (1 << i)) {
9656 if (insn & (1 << 11)) {
9658 tmp = gen_ld32(addr, IS_USER(s));
9662 store_reg(s, i, tmp);
9666 tmp = load_reg(s, i);
9667 gen_st32(tmp, addr, IS_USER(s));
9669 /* advance to the next address */
9670 tcg_gen_addi_i32(addr, addr, 4);
9673 if ((insn & (1 << rn)) == 0) {
9674 /* base reg not in list: base register writeback */
9675 store_reg(s, rn, addr);
9677 /* base reg in list: if load, complete it now */
9678 if (insn & (1 << 11)) {
9679 store_reg(s, rn, loaded_var);
9681 tcg_temp_free_i32(addr);
9686 /* conditional branch or swi */
9687 cond = (insn >> 8) & 0xf;
9693 gen_set_pc_im(s->pc);
9694 s->is_jmp = DISAS_SWI;
9697 /* generate a conditional jump to next instruction */
9698 s->condlabel = gen_new_label();
9699 gen_test_cc(cond ^ 1, s->condlabel);
9702 /* jump to the offset */
9703 val = (uint32_t)s->pc + 2;
9704 offset = ((int32_t)insn << 24) >> 24;
9710 if (insn & (1 << 11)) {
9711 if (disas_thumb2_insn(env, s, insn))
9715 /* unconditional branch */
9716 val = (uint32_t)s->pc;
9717 offset = ((int32_t)insn << 21) >> 21;
9718 val += (offset << 1) + 2;
9723 if (disas_thumb2_insn(env, s, insn))
9729 gen_exception_insn(s, 4, EXCP_UDEF);
9733 gen_exception_insn(s, 2, EXCP_UDEF);
9736 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9737 basic block 'tb'. If search_pc is TRUE, also generate PC
9738 information for each intermediate instruction. */
9739 static inline void gen_intermediate_code_internal(CPUARMState *env,
9740 TranslationBlock *tb,
9743 DisasContext dc1, *dc = &dc1;
9745 uint16_t *gen_opc_end;
9747 target_ulong pc_start;
9748 uint32_t next_page_start;
9752 /* generate intermediate code */
9757 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9759 dc->is_jmp = DISAS_NEXT;
9761 dc->singlestep_enabled = env->singlestep_enabled;
9763 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9764 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9765 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9766 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9767 #if !defined(CONFIG_USER_ONLY)
9768 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9770 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9771 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9772 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9773 cpu_F0s = tcg_temp_new_i32();
9774 cpu_F1s = tcg_temp_new_i32();
9775 cpu_F0d = tcg_temp_new_i64();
9776 cpu_F1d = tcg_temp_new_i64();
9779 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9780 cpu_M0 = tcg_temp_new_i64();
9781 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9784 max_insns = tb->cflags & CF_COUNT_MASK;
9786 max_insns = CF_COUNT_MASK;
9790 tcg_clear_temp_count();
9792 /* A note on handling of the condexec (IT) bits:
9794 * We want to avoid the overhead of having to write the updated condexec
9795 * bits back to the CPUARMState for every instruction in an IT block. So:
9796 * (1) if the condexec bits are not already zero then we write
9797 * zero back into the CPUARMState now. This avoids complications trying
9798 * to do it at the end of the block. (For example if we don't do this
9799 * it's hard to identify whether we can safely skip writing condexec
9800 * at the end of the TB, which we definitely want to do for the case
9801 * where a TB doesn't do anything with the IT state at all.)
9802 * (2) if we are going to leave the TB then we call gen_set_condexec()
9803 * which will write the correct value into CPUARMState if zero is wrong.
9804 * This is done both for leaving the TB at the end, and for leaving
9805 * it because of an exception we know will happen, which is done in
9806 * gen_exception_insn(). The latter is necessary because we need to
9807 * leave the TB with the PC/IT state just prior to execution of the
9808 * instruction which caused the exception.
9809 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9810 * then the CPUARMState will be wrong and we need to reset it.
9811 * This is handled in the same way as restoration of the
9812 * PC in these situations: we will be called again with search_pc=1
9813 * and generate a mapping of the condexec bits for each PC in
9814 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9815 * this to restore the condexec bits.
9817 * Note that there are no instructions which can read the condexec
9818 * bits, and none which can write non-static values to them, so
9819 * we don't need to care about whether CPUARMState is correct in the
9823 /* Reset the conditional execution bits immediately. This avoids
9824 complications trying to do it at the end of the block. */
9825 if (dc->condexec_mask || dc->condexec_cond)
9827 TCGv tmp = tcg_temp_new_i32();
9828 tcg_gen_movi_i32(tmp, 0);
9829 store_cpu_field(tmp, condexec_bits);
9832 #ifdef CONFIG_USER_ONLY
9833 /* Intercept jump to the magic kernel page. */
9834 if (dc->pc >= 0xffff0000) {
9835 /* We always get here via a jump, so know we are not in a
9836 conditional execution block. */
9837 gen_exception(EXCP_KERNEL_TRAP);
9838 dc->is_jmp = DISAS_UPDATE;
9842 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9843 /* We always get here via a jump, so know we are not in a
9844 conditional execution block. */
9845 gen_exception(EXCP_EXCEPTION_EXIT);
9846 dc->is_jmp = DISAS_UPDATE;
9851 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9852 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9853 if (bp->pc == dc->pc) {
9854 gen_exception_insn(dc, 0, EXCP_DEBUG);
9855 /* Advance PC so that clearing the breakpoint will
9856 invalidate this TB. */
9858 goto done_generating;
9864 j = gen_opc_ptr - gen_opc_buf;
9868 gen_opc_instr_start[lj++] = 0;
9870 gen_opc_pc[lj] = dc->pc;
9871 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9872 gen_opc_instr_start[lj] = 1;
9873 gen_opc_icount[lj] = num_insns;
9876 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9879 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9880 tcg_gen_debug_insn_start(dc->pc);
9884 disas_thumb_insn(env, dc);
9885 if (dc->condexec_mask) {
9886 dc->condexec_cond = (dc->condexec_cond & 0xe)
9887 | ((dc->condexec_mask >> 4) & 1);
9888 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9889 if (dc->condexec_mask == 0) {
9890 dc->condexec_cond = 0;
9894 disas_arm_insn(env, dc);
9897 if (dc->condjmp && !dc->is_jmp) {
9898 gen_set_label(dc->condlabel);
9902 if (tcg_check_temp_count()) {
9903 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9906 /* Translation stops when a conditional branch is encountered.
9907 * Otherwise the subsequent code could get translated several times.
9908 * Also stop translation when a page boundary is reached. This
9909 * ensures prefetch aborts occur at the right place. */
9911 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9912 !env->singlestep_enabled &&
9914 dc->pc < next_page_start &&
9915 num_insns < max_insns);
9917 if (tb->cflags & CF_LAST_IO) {
9919 /* FIXME: This can theoretically happen with self-modifying
9921 cpu_abort(env, "IO on conditional branch instruction");
9926 /* At this stage dc->condjmp will only be set when the skipped
9927 instruction was a conditional branch or trap, and the PC has
9928 already been written. */
9929 if (unlikely(env->singlestep_enabled)) {
9930 /* Make sure the pc is updated, and raise a debug exception. */
9932 gen_set_condexec(dc);
9933 if (dc->is_jmp == DISAS_SWI) {
9934 gen_exception(EXCP_SWI);
9936 gen_exception(EXCP_DEBUG);
9938 gen_set_label(dc->condlabel);
9940 if (dc->condjmp || !dc->is_jmp) {
9941 gen_set_pc_im(dc->pc);
9944 gen_set_condexec(dc);
9945 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9946 gen_exception(EXCP_SWI);
9948 /* FIXME: Single stepping a WFI insn will not halt
9950 gen_exception(EXCP_DEBUG);
9953 /* While branches must always occur at the end of an IT block,
9954 there are a few other things that can cause us to terminate
9955 the TB in the middel of an IT block:
9956 - Exception generating instructions (bkpt, swi, undefined).
9958 - Hardware watchpoints.
9959 Hardware breakpoints have already been handled and skip this code.
9961 gen_set_condexec(dc);
9962 switch(dc->is_jmp) {
9964 gen_goto_tb(dc, 1, dc->pc);
9969 /* indicate that the hash table must be used to find the next TB */
9973 /* nothing more to generate */
9979 gen_exception(EXCP_SWI);
9983 gen_set_label(dc->condlabel);
9984 gen_set_condexec(dc);
9985 gen_goto_tb(dc, 1, dc->pc);
9991 gen_icount_end(tb, num_insns);
9992 *gen_opc_ptr = INDEX_op_end;
9995 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9996 qemu_log("----------------\n");
9997 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9998 log_target_disas(pc_start, dc->pc - pc_start,
9999 dc->thumb | (dc->bswap_code << 1));
10004 j = gen_opc_ptr - gen_opc_buf;
10007 gen_opc_instr_start[lj++] = 0;
10009 tb->size = dc->pc - pc_start;
10010 tb->icount = num_insns;
10014 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10016 gen_intermediate_code_internal(env, tb, 0);
10019 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10021 gen_intermediate_code_internal(env, tb, 1);
10024 static const char *cpu_mode_names[16] = {
10025 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10026 "???", "???", "???", "und", "???", "???", "???", "sys"
10029 void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
10039 /* ??? This assumes float64 and double have the same layout.
10040 Oh well, it's only debug dumps. */
10048 for(i=0;i<16;i++) {
10049 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10051 cpu_fprintf(f, "\n");
10053 cpu_fprintf(f, " ");
10055 psr = cpsr_read(env);
10056 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10058 psr & (1 << 31) ? 'N' : '-',
10059 psr & (1 << 30) ? 'Z' : '-',
10060 psr & (1 << 29) ? 'C' : '-',
10061 psr & (1 << 28) ? 'V' : '-',
10062 psr & CPSR_T ? 'T' : 'A',
10063 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10066 for (i = 0; i < 16; i++) {
10067 d.d = env->vfp.regs[i];
10071 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10072 i * 2, (int)s0.i, s0.s,
10073 i * 2 + 1, (int)s1.i, s1.s,
10074 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
10077 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10081 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10083 env->regs[15] = gen_opc_pc[pc_pos];
10084 env->condexec_bits = gen_opc_condexec_bits[pc_pos];