4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext {
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 condtional execution bits. */
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
100 #include "gen-icount.h"
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUARMState, regs[i]),
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUARMState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUARMState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUARMState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUARMState, exclusive_info), "exclusive_info");
135 static inline TCGv load_cpu_offset(int offset)
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
144 static inline void store_cpu_offset(TCGv var, int offset)
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
158 /* normaly, since we updated PC, we need only to add one insn */
160 addr = (long)s->pc + 2;
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
165 tcg_gen_mov_i32(var, cpu_R[reg]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
177 /* Set a CPU register. The source must be a temporary and will be
179 static void store_reg(DisasContext *s, int reg, TCGv var)
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp)
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
216 static void gen_smul_dual(TCGv a, TCGv b)
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
293 tcg_temp_free_i64(tmp64);
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
307 tcg_temp_free_i64(tmp64);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0, TCGv t1)
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
380 tcg_temp_free_i32(tmp);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
391 static void gen_adc(TCGv t0, TCGv t1)
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var, int shift)
426 TCGv tmp = tcg_temp_new_i32();
428 tcg_gen_andi_i32(tmp, var, 1);
430 tcg_gen_shri_i32(tmp, var, shift);
432 tcg_gen_andi_i32(tmp, tmp, 1);
435 tcg_temp_free_i32(tmp);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
452 tcg_gen_shri_i32(var, var, 31);
455 tcg_gen_movi_i32(var, 0);
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
466 shifter_out_im(var, shift - 1);
469 tcg_gen_sari_i32(var, var, shift);
471 case 3: /* ROR/RRX */
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
477 TCGv tmp = load_cpu_field(CF);
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
507 tcg_temp_free_i32(shift);
510 #define PAS_OP(pfx) \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
529 tcg_temp_free_ptr(tmp);
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
535 tcg_temp_free_ptr(tmp);
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 #undef gen_pas_helper
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
576 tcg_temp_free_ptr(tmp);
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
582 tcg_temp_free_ptr(tmp);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 static void gen_test_cc(int cc, int label)
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
698 tcg_temp_free_i32(tmp);
701 static const uint8_t table_logic_cc[16] = {
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
730 tcg_temp_free_i32(tmp);
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
750 if (reg == 15 && ENABLE_ARCH_7) {
753 store_reg(s, reg, var);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
764 if (reg == 15 && ENABLE_ARCH_5) {
767 store_reg(s, reg, var);
771 static inline TCGv gen_ld8s(TCGv addr, int index)
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
777 static inline TCGv gen_ld8u(TCGv addr, int index)
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
783 static inline TCGv gen_ld16s(TCGv addr, int index)
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
789 static inline TCGv gen_ld16u(TCGv addr, int index)
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
795 static inline TCGv gen_ld32(TCGv addr, int index)
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
828 static inline void gen_set_pc_im(uint32_t val)
830 tcg_gen_movi_i32(cpu_R[15], val);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
843 int val, rm, shift, shiftop;
846 if (!(insn & (1 << 25))) {
849 if (!(insn & (1 << 23)))
852 tcg_gen_addi_i32(var, var, val);
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
874 if (insn & (1 << 22)) {
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
881 tcg_gen_addi_i32(var, var, val);
885 tcg_gen_addi_i32(var, var, extra);
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
896 static TCGv_ptr get_fpstatus_ptr(int neon)
898 TCGv_ptr statusptr = tcg_temp_new_ptr();
901 offset = offsetof(CPUARMState, vfp.standard_fp_status);
903 offset = offsetof(CPUARMState, vfp.fp_status);
905 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
918 tcg_temp_free_ptr(fpst); \
928 static inline void gen_vfp_F1_mul(int dp)
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst = get_fpstatus_ptr(0);
933 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
935 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
937 tcg_temp_free_ptr(fpst);
940 static inline void gen_vfp_F1_neg(int dp)
942 /* Like gen_vfp_neg() but put result in F1 */
944 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
946 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
950 static inline void gen_vfp_abs(int dp)
953 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
955 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
958 static inline void gen_vfp_neg(int dp)
961 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
963 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
966 static inline void gen_vfp_sqrt(int dp)
969 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
971 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
974 static inline void gen_vfp_cmp(int dp)
977 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
979 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
982 static inline void gen_vfp_cmpe(int dp)
985 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
987 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
990 static inline void gen_vfp_F1_ld0(int dp)
993 tcg_gen_movi_i64(cpu_F1d, 0);
995 tcg_gen_movi_i32(cpu_F1s, 0);
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1007 tcg_temp_free_ptr(statusptr); \
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 tcg_temp_free_ptr(statusptr); \
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1055 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1058 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1060 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1063 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1066 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1068 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1072 vfp_reg_offset (int dp, int reg)
1075 return offsetof(CPUARMState, vfp.regs[reg]);
1077 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1078 + offsetof(CPU_DoubleU, l.upper);
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.lower);
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1088 neon_reg_offset (int reg, int n)
1092 return vfp_reg_offset(0, sreg);
1095 static TCGv neon_load_reg(int reg, int pass)
1097 TCGv tmp = tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1102 static void neon_store_reg(int reg, int pass, TCGv var)
1104 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1105 tcg_temp_free_i32(var);
1108 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1110 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1113 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1115 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1123 static inline void gen_mov_F0_vreg(int dp, int reg)
1126 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1128 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1131 static inline void gen_mov_F1_vreg(int dp, int reg)
1134 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1136 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1139 static inline void gen_mov_vreg_F0(int dp, int reg)
1142 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1144 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1147 #define ARM_CP_RW_BIT (1 << 20)
1149 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1151 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1154 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1156 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1159 static inline TCGv iwmmxt_load_creg(int reg)
1161 TCGv var = tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1166 static inline void iwmmxt_store_creg(int reg, TCGv var)
1168 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1169 tcg_temp_free_i32(var);
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1174 iwmmxt_store_reg(cpu_M0, rn);
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1179 iwmmxt_load_reg(cpu_M0, rn);
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1184 iwmmxt_load_reg(cpu_V1, rn);
1185 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1190 iwmmxt_load_reg(cpu_V1, rn);
1191 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1196 iwmmxt_load_reg(cpu_V1, rn);
1197 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1236 IWMMXT_OP_ENV_SIZE(unpackl)
1237 IWMMXT_OP_ENV_SIZE(unpackh)
1239 IWMMXT_OP_ENV1(unpacklub)
1240 IWMMXT_OP_ENV1(unpackluw)
1241 IWMMXT_OP_ENV1(unpacklul)
1242 IWMMXT_OP_ENV1(unpackhub)
1243 IWMMXT_OP_ENV1(unpackhuw)
1244 IWMMXT_OP_ENV1(unpackhul)
1245 IWMMXT_OP_ENV1(unpacklsb)
1246 IWMMXT_OP_ENV1(unpacklsw)
1247 IWMMXT_OP_ENV1(unpacklsl)
1248 IWMMXT_OP_ENV1(unpackhsb)
1249 IWMMXT_OP_ENV1(unpackhsw)
1250 IWMMXT_OP_ENV1(unpackhsl)
1252 IWMMXT_OP_ENV_SIZE(cmpeq)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu)
1254 IWMMXT_OP_ENV_SIZE(cmpgts)
1256 IWMMXT_OP_ENV_SIZE(mins)
1257 IWMMXT_OP_ENV_SIZE(minu)
1258 IWMMXT_OP_ENV_SIZE(maxs)
1259 IWMMXT_OP_ENV_SIZE(maxu)
1261 IWMMXT_OP_ENV_SIZE(subn)
1262 IWMMXT_OP_ENV_SIZE(addn)
1263 IWMMXT_OP_ENV_SIZE(subu)
1264 IWMMXT_OP_ENV_SIZE(addu)
1265 IWMMXT_OP_ENV_SIZE(subs)
1266 IWMMXT_OP_ENV_SIZE(adds)
1268 IWMMXT_OP_ENV(avgb0)
1269 IWMMXT_OP_ENV(avgb1)
1270 IWMMXT_OP_ENV(avgw0)
1271 IWMMXT_OP_ENV(avgw1)
1275 IWMMXT_OP_ENV(packuw)
1276 IWMMXT_OP_ENV(packul)
1277 IWMMXT_OP_ENV(packuq)
1278 IWMMXT_OP_ENV(packsw)
1279 IWMMXT_OP_ENV(packsl)
1280 IWMMXT_OP_ENV(packsq)
1282 static void gen_op_iwmmxt_set_mup(void)
1285 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1286 tcg_gen_ori_i32(tmp, tmp, 2);
1287 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1290 static void gen_op_iwmmxt_set_cup(void)
1293 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1294 tcg_gen_ori_i32(tmp, tmp, 1);
1295 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1300 TCGv tmp = tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1302 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1307 iwmmxt_load_reg(cpu_V1, rn);
1308 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1309 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1312 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1318 rd = (insn >> 16) & 0xf;
1319 tmp = load_reg(s, rd);
1321 offset = (insn & 0xff) << ((insn >> 7) & 2);
1322 if (insn & (1 << 24)) {
1324 if (insn & (1 << 23))
1325 tcg_gen_addi_i32(tmp, tmp, offset);
1327 tcg_gen_addi_i32(tmp, tmp, -offset);
1328 tcg_gen_mov_i32(dest, tmp);
1329 if (insn & (1 << 21))
1330 store_reg(s, rd, tmp);
1332 tcg_temp_free_i32(tmp);
1333 } else if (insn & (1 << 21)) {
1335 tcg_gen_mov_i32(dest, tmp);
1336 if (insn & (1 << 23))
1337 tcg_gen_addi_i32(tmp, tmp, offset);
1339 tcg_gen_addi_i32(tmp, tmp, -offset);
1340 store_reg(s, rd, tmp);
1341 } else if (!(insn & (1 << 23)))
1346 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1348 int rd = (insn >> 0) & 0xf;
1351 if (insn & (1 << 8)) {
1352 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1355 tmp = iwmmxt_load_creg(rd);
1358 tmp = tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0, rd);
1360 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1362 tcg_gen_andi_i32(tmp, tmp, mask);
1363 tcg_gen_mov_i32(dest, tmp);
1364 tcg_temp_free_i32(tmp);
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1373 int rdhi, rdlo, rd0, rd1, i;
1375 TCGv tmp, tmp2, tmp3;
1377 if ((insn & 0x0e000e00) == 0x0c000000) {
1378 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1380 rdlo = (insn >> 12) & 0xf;
1381 rdhi = (insn >> 16) & 0xf;
1382 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0, wrd);
1384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1389 iwmmxt_store_reg(cpu_V0, wrd);
1390 gen_op_iwmmxt_set_mup();
1395 wrd = (insn >> 12) & 0xf;
1396 addr = tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s, insn, addr)) {
1398 tcg_temp_free_i32(addr);
1401 if (insn & ARM_CP_RW_BIT) {
1402 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1403 tmp = tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1405 iwmmxt_store_creg(wrd, tmp);
1408 if (insn & (1 << 8)) {
1409 if (insn & (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1412 } else { /* WLDRW wRd */
1413 tmp = gen_ld32(addr, IS_USER(s));
1416 if (insn & (1 << 22)) { /* WLDRH */
1417 tmp = gen_ld16u(addr, IS_USER(s));
1418 } else { /* WLDRB */
1419 tmp = gen_ld8u(addr, IS_USER(s));
1423 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1424 tcg_temp_free_i32(tmp);
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1429 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1430 tmp = iwmmxt_load_creg(wrd);
1431 gen_st32(tmp, addr, IS_USER(s));
1433 gen_op_iwmmxt_movq_M0_wRn(wrd);
1434 tmp = tcg_temp_new_i32();
1435 if (insn & (1 << 8)) {
1436 if (insn & (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp);
1438 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1441 gen_st32(tmp, addr, IS_USER(s));
1444 if (insn & (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1446 gen_st16(tmp, addr, IS_USER(s));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1449 gen_st8(tmp, addr, IS_USER(s));
1454 tcg_temp_free_i32(addr);
1458 if ((insn & 0x0f000000) != 0x0e000000)
1461 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 0) & 0xf;
1465 rd1 = (insn >> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1473 case 0x011: /* TMCR */
1476 rd = (insn >> 12) & 0xf;
1477 wrd = (insn >> 16) & 0xf;
1479 case ARM_IWMMXT_wCID:
1480 case ARM_IWMMXT_wCASF:
1482 case ARM_IWMMXT_wCon:
1483 gen_op_iwmmxt_set_cup();
1485 case ARM_IWMMXT_wCSSF:
1486 tmp = iwmmxt_load_creg(wrd);
1487 tmp2 = load_reg(s, rd);
1488 tcg_gen_andc_i32(tmp, tmp, tmp2);
1489 tcg_temp_free_i32(tmp2);
1490 iwmmxt_store_creg(wrd, tmp);
1492 case ARM_IWMMXT_wCGR0:
1493 case ARM_IWMMXT_wCGR1:
1494 case ARM_IWMMXT_wCGR2:
1495 case ARM_IWMMXT_wCGR3:
1496 gen_op_iwmmxt_set_cup();
1497 tmp = load_reg(s, rd);
1498 iwmmxt_store_creg(wrd, tmp);
1504 case 0x100: /* WXOR */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1515 case 0x111: /* TMRC */
1518 rd = (insn >> 12) & 0xf;
1519 wrd = (insn >> 16) & 0xf;
1520 tmp = iwmmxt_load_creg(wrd);
1521 store_reg(s, rd, tmp);
1523 case 0x300: /* WANDN */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1535 case 0x200: /* WAND */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 0) & 0xf;
1538 rd1 = (insn >> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 switch ((insn >> 22) & 3) {
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 16) & 0xf;
1583 rd1 = (insn >> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 switch ((insn >> 22) & 3) {
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd = (insn >> 12) & 0xf;
1604 rd0 = (insn >> 16) & 0xf;
1605 rd1 = (insn >> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0);
1607 if (insn & (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1611 if (!(insn & (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 if (insn & (1 << 21)) {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 if (insn & (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1644 if (!(insn & (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1, wrd);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 rd1 = (insn >> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 switch ((insn >> 22) & 3) {
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
1678 if (insn & (1 << 22)) {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1684 if (insn & (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1699 tcg_gen_andi_i32(tmp, tmp, 7);
1700 iwmmxt_load_reg(cpu_V1, rd1);
1701 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1702 tcg_temp_free_i32(tmp);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn >> 6) & 3) == 3)
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 tmp = load_reg(s, rd);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd);
1713 switch ((insn >> 6) & 3) {
1715 tmp2 = tcg_const_i32(0xff);
1716 tmp3 = tcg_const_i32((insn & 7) << 3);
1719 tmp2 = tcg_const_i32(0xffff);
1720 tmp3 = tcg_const_i32((insn & 3) << 4);
1723 tmp2 = tcg_const_i32(0xffffffff);
1724 tmp3 = tcg_const_i32((insn & 1) << 5);
1730 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1731 tcg_temp_free(tmp3);
1732 tcg_temp_free(tmp2);
1733 tcg_temp_free_i32(tmp);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd = (insn >> 12) & 0xf;
1739 wrd = (insn >> 16) & 0xf;
1740 if (rd == 15 || ((insn >> 22) & 3) == 3)
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
1743 tmp = tcg_temp_new_i32();
1744 switch ((insn >> 22) & 3) {
1746 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1749 tcg_gen_ext8s_i32(tmp, tmp);
1751 tcg_gen_andi_i32(tmp, tmp, 0xff);
1755 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1758 tcg_gen_ext16s_i32(tmp, tmp);
1760 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1764 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1768 store_reg(s, rd, tmp);
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1773 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1774 switch ((insn >> 22) & 3) {
1776 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1779 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1782 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1785 tcg_gen_shli_i32(tmp, tmp, 28);
1787 tcg_temp_free_i32(tmp);
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn >> 6) & 3) == 3)
1792 rd = (insn >> 12) & 0xf;
1793 wrd = (insn >> 16) & 0xf;
1794 tmp = load_reg(s, rd);
1795 switch ((insn >> 6) & 3) {
1797 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1800 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1803 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1806 tcg_temp_free_i32(tmp);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1814 tmp2 = tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2, tmp);
1816 switch ((insn >> 22) & 3) {
1818 for (i = 0; i < 7; i ++) {
1819 tcg_gen_shli_i32(tmp2, tmp2, 4);
1820 tcg_gen_and_i32(tmp, tmp, tmp2);
1824 for (i = 0; i < 3; i ++) {
1825 tcg_gen_shli_i32(tmp2, tmp2, 8);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
1830 tcg_gen_shli_i32(tmp2, tmp2, 16);
1831 tcg_gen_and_i32(tmp, tmp, tmp2);
1835 tcg_temp_free_i32(tmp2);
1836 tcg_temp_free_i32(tmp);
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd = (insn >> 12) & 0xf;
1840 rd0 = (insn >> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 switch ((insn >> 22) & 3) {
1844 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1847 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1850 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1862 tmp2 = tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2, tmp);
1864 switch ((insn >> 22) & 3) {
1866 for (i = 0; i < 7; i ++) {
1867 tcg_gen_shli_i32(tmp2, tmp2, 4);
1868 tcg_gen_or_i32(tmp, tmp, tmp2);
1872 for (i = 0; i < 3; i ++) {
1873 tcg_gen_shli_i32(tmp2, tmp2, 8);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
1878 tcg_gen_shli_i32(tmp2, tmp2, 16);
1879 tcg_gen_or_i32(tmp, tmp, tmp2);
1883 tcg_temp_free_i32(tmp2);
1884 tcg_temp_free_i32(tmp);
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
1889 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
1892 tmp = tcg_temp_new_i32();
1893 switch ((insn >> 22) & 3) {
1895 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1898 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1901 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1904 store_reg(s, rd, tmp);
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1948 gen_op_iwmmxt_unpacklub_M0();
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1954 gen_op_iwmmxt_unpackluw_M0();
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1960 gen_op_iwmmxt_unpacklul_M0();
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1979 gen_op_iwmmxt_unpackhub_M0();
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1985 gen_op_iwmmxt_unpackhuw_M0();
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1991 gen_op_iwmmxt_unpackhul_M0();
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn >> 22) & 3) == 0)
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 tmp = tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2009 tcg_temp_free_i32(tmp);
2012 switch ((insn >> 22) & 3) {
2014 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2017 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2020 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2023 tcg_temp_free_i32(tmp);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn >> 22) & 3) == 0)
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 tmp = tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2037 tcg_temp_free_i32(tmp);
2040 switch ((insn >> 22) & 3) {
2042 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2045 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2048 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2051 tcg_temp_free_i32(tmp);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn >> 22) & 3) == 0)
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 tmp = tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2065 tcg_temp_free_i32(tmp);
2068 switch ((insn >> 22) & 3) {
2070 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2073 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2076 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2079 tcg_temp_free_i32(tmp);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn >> 22) & 3) == 0)
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 tmp = tcg_temp_new_i32();
2092 switch ((insn >> 22) & 3) {
2094 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2095 tcg_temp_free_i32(tmp);
2098 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2101 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2102 tcg_temp_free_i32(tmp);
2105 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2108 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2109 tcg_temp_free_i32(tmp);
2112 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2115 tcg_temp_free_i32(tmp);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2131 gen_op_iwmmxt_minub_M0_wRn(rd1);
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2143 gen_op_iwmmxt_minul_M0_wRn(rd1);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2165 if (insn & (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2171 if (insn & (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd = (insn >> 12) & 0xf;
2185 rd0 = (insn >> 16) & 0xf;
2186 rd1 = (insn >> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0);
2188 tmp = tcg_const_i32((insn >> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1, rd1);
2190 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 20) & 0xf) {
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2208 gen_op_iwmmxt_subub_M0_wRn(rd1);
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2226 gen_op_iwmmxt_subul_M0_wRn(rd1);
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 20) & 0xf) {
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2265 gen_op_iwmmxt_addub_M0_wRn(rd1);
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2283 gen_op_iwmmxt_addul_M0_wRn(rd1);
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2301 wrd = (insn >> 12) & 0xf;
2302 rd0 = (insn >> 16) & 0xf;
2303 rd1 = (insn >> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0);
2305 switch ((insn >> 22) & 3) {
2307 if (insn & (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2316 gen_op_iwmmxt_packul_M0_wRn(rd1);
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd = (insn >> 5) & 0xf;
2334 rd0 = (insn >> 12) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 if (rd0 == 0xf || rd1 == 0xf)
2338 gen_op_iwmmxt_movq_M0_wRn(wrd);
2339 tmp = load_reg(s, rd0);
2340 tmp2 = load_reg(s, rd1);
2341 switch ((insn >> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn & (1 << 16))
2350 tcg_gen_shri_i32(tmp, tmp, 16);
2351 if (insn & (1 << 17))
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
2360 tcg_temp_free_i32(tmp2);
2361 tcg_temp_free_i32(tmp);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd);
2363 gen_op_iwmmxt_set_mup();
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2376 int acc, rd0, rd1, rdhi, rdlo;
2379 if ((insn & 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0 = (insn >> 12) & 0xf;
2383 acc = (insn >> 5) & 7;
2388 tmp = load_reg(s, rd0);
2389 tmp2 = load_reg(s, rd1);
2390 switch ((insn >> 16) & 0xf) {
2392 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn & (1 << 16))
2402 tcg_gen_shri_i32(tmp, tmp, 16);
2403 if (insn & (1 << 17))
2404 tcg_gen_shri_i32(tmp2, tmp2, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2410 tcg_temp_free_i32(tmp2);
2411 tcg_temp_free_i32(tmp);
2413 gen_op_iwmmxt_movq_wRn_M0(acc);
2417 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi = (insn >> 16) & 0xf;
2420 rdlo = (insn >> 12) & 0xf;
2426 if (insn & ARM_CP_RW_BIT) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0, acc);
2428 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2429 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2431 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2433 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2434 iwmmxt_store_reg(cpu_V0, acc);
2442 static int cp15_user_ok(CPUARMState *env, uint32_t insn)
2444 int cpn = (insn >> 16) & 0xf;
2445 int cpm = insn & 0xf;
2446 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2448 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2449 /* Performance monitor registers fall into three categories:
2450 * (a) always UNDEF in usermode
2451 * (b) UNDEF only if PMUSERENR.EN is 0
2452 * (c) always read OK and UNDEF on write (PMUSERENR only)
2454 if ((cpm == 12 && (op < 6)) ||
2455 (cpm == 13 && (op < 3))) {
2456 return env->cp15.c9_pmuserenr;
2457 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2458 /* PMUSERENR, read only */
2464 if (cpn == 13 && cpm == 0) {
2466 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2472 static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2475 int cpn = (insn >> 16) & 0xf;
2476 int cpm = insn & 0xf;
2477 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2479 if (!arm_feature(env, ARM_FEATURE_V6K))
2482 if (!(cpn == 13 && cpm == 0))
2485 if (insn & ARM_CP_RW_BIT) {
2488 tmp = load_cpu_field(cp15.c13_tls1);
2491 tmp = load_cpu_field(cp15.c13_tls2);
2494 tmp = load_cpu_field(cp15.c13_tls3);
2499 store_reg(s, rd, tmp);
2502 tmp = load_reg(s, rd);
2505 store_cpu_field(tmp, cp15.c13_tls1);
2508 store_cpu_field(tmp, cp15.c13_tls2);
2511 store_cpu_field(tmp, cp15.c13_tls3);
2514 tcg_temp_free_i32(tmp);
2521 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2522 instruction is not defined. */
2523 static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2528 /* M profile cores use memory mapped registers instead of cp15. */
2529 if (arm_feature(env, ARM_FEATURE_M))
2532 if ((insn & (1 << 25)) == 0) {
2533 if (insn & (1 << 20)) {
2537 /* mcrr. Used for block cache operations, so implement as no-op. */
2540 if ((insn & (1 << 4)) == 0) {
2544 /* We special case a number of cp15 instructions which were used
2545 * for things which are real instructions in ARMv7. This allows
2546 * them to work in linux-user mode which doesn't provide functional
2547 * get_cp15/set_cp15 helpers, and is more efficient anyway.
2549 switch ((insn & 0x0fff0fff)) {
2551 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2552 * In v7, this must NOP.
2557 if (!arm_feature(env, ARM_FEATURE_V7)) {
2558 /* Wait for interrupt. */
2559 gen_set_pc_im(s->pc);
2560 s->is_jmp = DISAS_WFI;
2564 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2565 * so this is slightly over-broad.
2567 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
2568 /* Wait for interrupt. */
2569 gen_set_pc_im(s->pc);
2570 s->is_jmp = DISAS_WFI;
2573 /* Otherwise continue to handle via helper function.
2574 * In particular, on v7 and some v6 cores this is one of
2575 * the VA-PA registers.
2579 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2580 if (arm_feature(env, ARM_FEATURE_V6)) {
2581 return IS_USER(s) ? 1 : 0;
2584 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2585 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2586 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2587 /* Barriers in both v6 and v7 */
2588 if (arm_feature(env, ARM_FEATURE_V6)) {
2596 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2600 rd = (insn >> 12) & 0xf;
2602 if (cp15_tls_load_store(env, s, insn, rd))
2605 tmp2 = tcg_const_i32(insn);
2606 if (insn & ARM_CP_RW_BIT) {
2607 tmp = tcg_temp_new_i32();
2608 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2609 /* If the destination register is r15 then sets condition codes. */
2611 store_reg(s, rd, tmp);
2613 tcg_temp_free_i32(tmp);
2615 tmp = load_reg(s, rd);
2616 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2617 tcg_temp_free_i32(tmp);
2618 /* Normally we would always end the TB here, but Linux
2619 * arch/arm/mach-pxa/sleep.S expects two instructions following
2620 * an MMU enable to execute from cache. Imitate this behaviour. */
2621 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2622 (insn & 0x0fff0fff) != 0x0e010f10)
2625 tcg_temp_free_i32(tmp2);
2629 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2630 #define VFP_SREG(insn, bigbit, smallbit) \
2631 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2632 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2633 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2634 reg = (((insn) >> (bigbit)) & 0x0f) \
2635 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2637 if (insn & (1 << (smallbit))) \
2639 reg = ((insn) >> (bigbit)) & 0x0f; \
2642 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2643 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2644 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2645 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2646 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2647 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2649 /* Move between integer and VFP cores. */
2650 static TCGv gen_vfp_mrs(void)
2652 TCGv tmp = tcg_temp_new_i32();
2653 tcg_gen_mov_i32(tmp, cpu_F0s);
2657 static void gen_vfp_msr(TCGv tmp)
2659 tcg_gen_mov_i32(cpu_F0s, tmp);
2660 tcg_temp_free_i32(tmp);
2663 static void gen_neon_dup_u8(TCGv var, int shift)
2665 TCGv tmp = tcg_temp_new_i32();
2667 tcg_gen_shri_i32(var, var, shift);
2668 tcg_gen_ext8u_i32(var, var);
2669 tcg_gen_shli_i32(tmp, var, 8);
2670 tcg_gen_or_i32(var, var, tmp);
2671 tcg_gen_shli_i32(tmp, var, 16);
2672 tcg_gen_or_i32(var, var, tmp);
2673 tcg_temp_free_i32(tmp);
2676 static void gen_neon_dup_low16(TCGv var)
2678 TCGv tmp = tcg_temp_new_i32();
2679 tcg_gen_ext16u_i32(var, var);
2680 tcg_gen_shli_i32(tmp, var, 16);
2681 tcg_gen_or_i32(var, var, tmp);
2682 tcg_temp_free_i32(tmp);
2685 static void gen_neon_dup_high16(TCGv var)
2687 TCGv tmp = tcg_temp_new_i32();
2688 tcg_gen_andi_i32(var, var, 0xffff0000);
2689 tcg_gen_shri_i32(tmp, var, 16);
2690 tcg_gen_or_i32(var, var, tmp);
2691 tcg_temp_free_i32(tmp);
2694 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2696 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2700 tmp = gen_ld8u(addr, IS_USER(s));
2701 gen_neon_dup_u8(tmp, 0);
2704 tmp = gen_ld16u(addr, IS_USER(s));
2705 gen_neon_dup_low16(tmp);
2708 tmp = gen_ld32(addr, IS_USER(s));
2710 default: /* Avoid compiler warnings. */
2716 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2717 (ie. an undefined instruction). */
2718 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2720 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2726 if (!arm_feature(env, ARM_FEATURE_VFP))
2729 if (!s->vfp_enabled) {
2730 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2731 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2733 rn = (insn >> 16) & 0xf;
2734 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2735 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2738 dp = ((insn & 0xf00) == 0xb00);
2739 switch ((insn >> 24) & 0xf) {
2741 if (insn & (1 << 4)) {
2742 /* single register transfer */
2743 rd = (insn >> 12) & 0xf;
2748 VFP_DREG_N(rn, insn);
2751 if (insn & 0x00c00060
2752 && !arm_feature(env, ARM_FEATURE_NEON))
2755 pass = (insn >> 21) & 1;
2756 if (insn & (1 << 22)) {
2758 offset = ((insn >> 5) & 3) * 8;
2759 } else if (insn & (1 << 5)) {
2761 offset = (insn & (1 << 6)) ? 16 : 0;
2766 if (insn & ARM_CP_RW_BIT) {
2768 tmp = neon_load_reg(rn, pass);
2772 tcg_gen_shri_i32(tmp, tmp, offset);
2773 if (insn & (1 << 23))
2779 if (insn & (1 << 23)) {
2781 tcg_gen_shri_i32(tmp, tmp, 16);
2787 tcg_gen_sari_i32(tmp, tmp, 16);
2796 store_reg(s, rd, tmp);
2799 tmp = load_reg(s, rd);
2800 if (insn & (1 << 23)) {
2803 gen_neon_dup_u8(tmp, 0);
2804 } else if (size == 1) {
2805 gen_neon_dup_low16(tmp);
2807 for (n = 0; n <= pass * 2; n++) {
2808 tmp2 = tcg_temp_new_i32();
2809 tcg_gen_mov_i32(tmp2, tmp);
2810 neon_store_reg(rn, n, tmp2);
2812 neon_store_reg(rn, n, tmp);
2817 tmp2 = neon_load_reg(rn, pass);
2818 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2819 tcg_temp_free_i32(tmp2);
2822 tmp2 = neon_load_reg(rn, pass);
2823 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2824 tcg_temp_free_i32(tmp2);
2829 neon_store_reg(rn, pass, tmp);
2833 if ((insn & 0x6f) != 0x00)
2835 rn = VFP_SREG_N(insn);
2836 if (insn & ARM_CP_RW_BIT) {
2838 if (insn & (1 << 21)) {
2839 /* system register */
2844 /* VFP2 allows access to FSID from userspace.
2845 VFP3 restricts all id registers to privileged
2848 && arm_feature(env, ARM_FEATURE_VFP3))
2850 tmp = load_cpu_field(vfp.xregs[rn]);
2855 tmp = load_cpu_field(vfp.xregs[rn]);
2857 case ARM_VFP_FPINST:
2858 case ARM_VFP_FPINST2:
2859 /* Not present in VFP3. */
2861 || arm_feature(env, ARM_FEATURE_VFP3))
2863 tmp = load_cpu_field(vfp.xregs[rn]);
2867 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2868 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2870 tmp = tcg_temp_new_i32();
2871 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2877 || !arm_feature(env, ARM_FEATURE_MVFR))
2879 tmp = load_cpu_field(vfp.xregs[rn]);
2885 gen_mov_F0_vreg(0, rn);
2886 tmp = gen_vfp_mrs();
2889 /* Set the 4 flag bits in the CPSR. */
2891 tcg_temp_free_i32(tmp);
2893 store_reg(s, rd, tmp);
2897 tmp = load_reg(s, rd);
2898 if (insn & (1 << 21)) {
2900 /* system register */
2905 /* Writes are ignored. */
2908 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2909 tcg_temp_free_i32(tmp);
2915 /* TODO: VFP subarchitecture support.
2916 * For now, keep the EN bit only */
2917 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2918 store_cpu_field(tmp, vfp.xregs[rn]);
2921 case ARM_VFP_FPINST:
2922 case ARM_VFP_FPINST2:
2923 store_cpu_field(tmp, vfp.xregs[rn]);
2930 gen_mov_vreg_F0(0, rn);
2935 /* data processing */
2936 /* The opcode is in bits 23, 21, 20 and 6. */
2937 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2941 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2943 /* rn is register number */
2944 VFP_DREG_N(rn, insn);
2947 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2948 /* Integer or single precision destination. */
2949 rd = VFP_SREG_D(insn);
2951 VFP_DREG_D(rd, insn);
2954 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2955 /* VCVT from int is always from S reg regardless of dp bit.
2956 * VCVT with immediate frac_bits has same format as SREG_M
2958 rm = VFP_SREG_M(insn);
2960 VFP_DREG_M(rm, insn);
2963 rn = VFP_SREG_N(insn);
2964 if (op == 15 && rn == 15) {
2965 /* Double precision destination. */
2966 VFP_DREG_D(rd, insn);
2968 rd = VFP_SREG_D(insn);
2970 /* NB that we implicitly rely on the encoding for the frac_bits
2971 * in VCVT of fixed to float being the same as that of an SREG_M
2973 rm = VFP_SREG_M(insn);
2976 veclen = s->vec_len;
2977 if (op == 15 && rn > 3)
2980 /* Shut up compiler warnings. */
2991 /* Figure out what type of vector operation this is. */
2992 if ((rd & bank_mask) == 0) {
2997 delta_d = (s->vec_stride >> 1) + 1;
2999 delta_d = s->vec_stride + 1;
3001 if ((rm & bank_mask) == 0) {
3002 /* mixed scalar/vector */
3011 /* Load the initial operands. */
3016 /* Integer source */
3017 gen_mov_F0_vreg(0, rm);
3022 gen_mov_F0_vreg(dp, rd);
3023 gen_mov_F1_vreg(dp, rm);
3027 /* Compare with zero */
3028 gen_mov_F0_vreg(dp, rd);
3039 /* Source and destination the same. */
3040 gen_mov_F0_vreg(dp, rd);
3046 /* VCVTB, VCVTT: only present with the halfprec extension,
3047 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3049 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3052 /* Otherwise fall through */
3054 /* One source operand. */
3055 gen_mov_F0_vreg(dp, rm);
3059 /* Two source operands. */
3060 gen_mov_F0_vreg(dp, rn);
3061 gen_mov_F1_vreg(dp, rm);
3065 /* Perform the calculation. */
3067 case 0: /* VMLA: fd + (fn * fm) */
3068 /* Note that order of inputs to the add matters for NaNs */
3070 gen_mov_F0_vreg(dp, rd);
3073 case 1: /* VMLS: fd + -(fn * fm) */
3076 gen_mov_F0_vreg(dp, rd);
3079 case 2: /* VNMLS: -fd + (fn * fm) */
3080 /* Note that it isn't valid to replace (-A + B) with (B - A)
3081 * or similar plausible looking simplifications
3082 * because this will give wrong results for NaNs.
3085 gen_mov_F0_vreg(dp, rd);
3089 case 3: /* VNMLA: -fd + -(fn * fm) */
3092 gen_mov_F0_vreg(dp, rd);
3096 case 4: /* mul: fn * fm */
3099 case 5: /* nmul: -(fn * fm) */
3103 case 6: /* add: fn + fm */
3106 case 7: /* sub: fn - fm */
3109 case 8: /* div: fn / fm */
3112 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3113 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3114 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3115 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3116 /* These are fused multiply-add, and must be done as one
3117 * floating point operation with no rounding between the
3118 * multiplication and addition steps.
3119 * NB that doing the negations here as separate steps is
3120 * correct : an input NaN should come out with its sign bit
3121 * flipped if it is a negated-input.
3123 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3131 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3133 frd = tcg_temp_new_i64();
3134 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3137 gen_helper_vfp_negd(frd, frd);
3139 fpst = get_fpstatus_ptr(0);
3140 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3141 cpu_F1d, frd, fpst);
3142 tcg_temp_free_ptr(fpst);
3143 tcg_temp_free_i64(frd);
3149 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3151 frd = tcg_temp_new_i32();
3152 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3154 gen_helper_vfp_negs(frd, frd);
3156 fpst = get_fpstatus_ptr(0);
3157 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3158 cpu_F1s, frd, fpst);
3159 tcg_temp_free_ptr(fpst);
3160 tcg_temp_free_i32(frd);
3163 case 14: /* fconst */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3167 n = (insn << 12) & 0x80000000;
3168 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3175 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3182 tcg_gen_movi_i32(cpu_F0s, n);
3185 case 15: /* extension space */
3199 case 4: /* vcvtb.f32.f16 */
3200 tmp = gen_vfp_mrs();
3201 tcg_gen_ext16u_i32(tmp, tmp);
3202 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3203 tcg_temp_free_i32(tmp);
3205 case 5: /* vcvtt.f32.f16 */
3206 tmp = gen_vfp_mrs();
3207 tcg_gen_shri_i32(tmp, tmp, 16);
3208 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3209 tcg_temp_free_i32(tmp);
3211 case 6: /* vcvtb.f16.f32 */
3212 tmp = tcg_temp_new_i32();
3213 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3214 gen_mov_F0_vreg(0, rd);
3215 tmp2 = gen_vfp_mrs();
3216 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3217 tcg_gen_or_i32(tmp, tmp, tmp2);
3218 tcg_temp_free_i32(tmp2);
3221 case 7: /* vcvtt.f16.f32 */
3222 tmp = tcg_temp_new_i32();
3223 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3224 tcg_gen_shli_i32(tmp, tmp, 16);
3225 gen_mov_F0_vreg(0, rd);
3226 tmp2 = gen_vfp_mrs();
3227 tcg_gen_ext16u_i32(tmp2, tmp2);
3228 tcg_gen_or_i32(tmp, tmp, tmp2);
3229 tcg_temp_free_i32(tmp2);
3241 case 11: /* cmpez */
3245 case 15: /* single<->double conversion */
3247 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3249 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3251 case 16: /* fuito */
3252 gen_vfp_uito(dp, 0);
3254 case 17: /* fsito */
3255 gen_vfp_sito(dp, 0);
3257 case 20: /* fshto */
3258 if (!arm_feature(env, ARM_FEATURE_VFP3))
3260 gen_vfp_shto(dp, 16 - rm, 0);
3262 case 21: /* fslto */
3263 if (!arm_feature(env, ARM_FEATURE_VFP3))
3265 gen_vfp_slto(dp, 32 - rm, 0);
3267 case 22: /* fuhto */
3268 if (!arm_feature(env, ARM_FEATURE_VFP3))
3270 gen_vfp_uhto(dp, 16 - rm, 0);
3272 case 23: /* fulto */
3273 if (!arm_feature(env, ARM_FEATURE_VFP3))
3275 gen_vfp_ulto(dp, 32 - rm, 0);
3277 case 24: /* ftoui */
3278 gen_vfp_toui(dp, 0);
3280 case 25: /* ftouiz */
3281 gen_vfp_touiz(dp, 0);
3283 case 26: /* ftosi */
3284 gen_vfp_tosi(dp, 0);
3286 case 27: /* ftosiz */
3287 gen_vfp_tosiz(dp, 0);
3289 case 28: /* ftosh */
3290 if (!arm_feature(env, ARM_FEATURE_VFP3))
3292 gen_vfp_tosh(dp, 16 - rm, 0);
3294 case 29: /* ftosl */
3295 if (!arm_feature(env, ARM_FEATURE_VFP3))
3297 gen_vfp_tosl(dp, 32 - rm, 0);
3299 case 30: /* ftouh */
3300 if (!arm_feature(env, ARM_FEATURE_VFP3))
3302 gen_vfp_touh(dp, 16 - rm, 0);
3304 case 31: /* ftoul */
3305 if (!arm_feature(env, ARM_FEATURE_VFP3))
3307 gen_vfp_toul(dp, 32 - rm, 0);
3309 default: /* undefined */
3313 default: /* undefined */
3317 /* Write back the result. */
3318 if (op == 15 && (rn >= 8 && rn <= 11))
3319 ; /* Comparison, do nothing. */
3320 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3321 /* VCVT double to int: always integer result. */
3322 gen_mov_vreg_F0(0, rd);
3323 else if (op == 15 && rn == 15)
3325 gen_mov_vreg_F0(!dp, rd);
3327 gen_mov_vreg_F0(dp, rd);
3329 /* break out of the loop if we have finished */
3333 if (op == 15 && delta_m == 0) {
3334 /* single source one-many */
3336 rd = ((rd + delta_d) & (bank_mask - 1))
3338 gen_mov_vreg_F0(dp, rd);
3342 /* Setup the next operands. */
3344 rd = ((rd + delta_d) & (bank_mask - 1))
3348 /* One source operand. */
3349 rm = ((rm + delta_m) & (bank_mask - 1))
3351 gen_mov_F0_vreg(dp, rm);
3353 /* Two source operands. */
3354 rn = ((rn + delta_d) & (bank_mask - 1))
3356 gen_mov_F0_vreg(dp, rn);
3358 rm = ((rm + delta_m) & (bank_mask - 1))
3360 gen_mov_F1_vreg(dp, rm);
3368 if ((insn & 0x03e00000) == 0x00400000) {
3369 /* two-register transfer */
3370 rn = (insn >> 16) & 0xf;
3371 rd = (insn >> 12) & 0xf;
3373 VFP_DREG_M(rm, insn);
3375 rm = VFP_SREG_M(insn);
3378 if (insn & ARM_CP_RW_BIT) {
3381 gen_mov_F0_vreg(0, rm * 2);
3382 tmp = gen_vfp_mrs();
3383 store_reg(s, rd, tmp);
3384 gen_mov_F0_vreg(0, rm * 2 + 1);
3385 tmp = gen_vfp_mrs();
3386 store_reg(s, rn, tmp);
3388 gen_mov_F0_vreg(0, rm);
3389 tmp = gen_vfp_mrs();
3390 store_reg(s, rd, tmp);
3391 gen_mov_F0_vreg(0, rm + 1);
3392 tmp = gen_vfp_mrs();
3393 store_reg(s, rn, tmp);
3398 tmp = load_reg(s, rd);
3400 gen_mov_vreg_F0(0, rm * 2);
3401 tmp = load_reg(s, rn);
3403 gen_mov_vreg_F0(0, rm * 2 + 1);
3405 tmp = load_reg(s, rd);
3407 gen_mov_vreg_F0(0, rm);
3408 tmp = load_reg(s, rn);
3410 gen_mov_vreg_F0(0, rm + 1);
3415 rn = (insn >> 16) & 0xf;
3417 VFP_DREG_D(rd, insn);
3419 rd = VFP_SREG_D(insn);
3420 if ((insn & 0x01200000) == 0x01000000) {
3421 /* Single load/store */
3422 offset = (insn & 0xff) << 2;
3423 if ((insn & (1 << 23)) == 0)
3425 if (s->thumb && rn == 15) {
3426 /* This is actually UNPREDICTABLE */
3427 addr = tcg_temp_new_i32();
3428 tcg_gen_movi_i32(addr, s->pc & ~2);
3430 addr = load_reg(s, rn);
3432 tcg_gen_addi_i32(addr, addr, offset);
3433 if (insn & (1 << 20)) {
3434 gen_vfp_ld(s, dp, addr);
3435 gen_mov_vreg_F0(dp, rd);
3437 gen_mov_F0_vreg(dp, rd);
3438 gen_vfp_st(s, dp, addr);
3440 tcg_temp_free_i32(addr);
3442 /* load/store multiple */
3443 int w = insn & (1 << 21);
3445 n = (insn >> 1) & 0x7f;
3449 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3450 /* P == U , W == 1 => UNDEF */
3453 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3454 /* UNPREDICTABLE cases for bad immediates: we choose to
3455 * UNDEF to avoid generating huge numbers of TCG ops
3459 if (rn == 15 && w) {
3460 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3464 if (s->thumb && rn == 15) {
3465 /* This is actually UNPREDICTABLE */
3466 addr = tcg_temp_new_i32();
3467 tcg_gen_movi_i32(addr, s->pc & ~2);
3469 addr = load_reg(s, rn);
3471 if (insn & (1 << 24)) /* pre-decrement */
3472 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3478 for (i = 0; i < n; i++) {
3479 if (insn & ARM_CP_RW_BIT) {
3481 gen_vfp_ld(s, dp, addr);
3482 gen_mov_vreg_F0(dp, rd + i);
3485 gen_mov_F0_vreg(dp, rd + i);
3486 gen_vfp_st(s, dp, addr);
3488 tcg_gen_addi_i32(addr, addr, offset);
3492 if (insn & (1 << 24))
3493 offset = -offset * n;
3494 else if (dp && (insn & 1))
3500 tcg_gen_addi_i32(addr, addr, offset);
3501 store_reg(s, rn, addr);
3503 tcg_temp_free_i32(addr);
3509 /* Should never happen. */
3515 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3517 TranslationBlock *tb;
3520 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3522 gen_set_pc_im(dest);
3523 tcg_gen_exit_tb((tcg_target_long)tb + n);
3525 gen_set_pc_im(dest);
3530 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3532 if (unlikely(s->singlestep_enabled)) {
3533 /* An indirect jump so that we still trigger the debug exception. */
3538 gen_goto_tb(s, 0, dest);
3539 s->is_jmp = DISAS_TB_JUMP;
3543 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3546 tcg_gen_sari_i32(t0, t0, 16);
3550 tcg_gen_sari_i32(t1, t1, 16);
3553 tcg_gen_mul_i32(t0, t0, t1);
3556 /* Return the mask of PSR bits set by a MSR instruction. */
3557 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3561 if (flags & (1 << 0))
3563 if (flags & (1 << 1))
3565 if (flags & (1 << 2))
3567 if (flags & (1 << 3))
3570 /* Mask out undefined bits. */
3571 mask &= ~CPSR_RESERVED;
3572 if (!arm_feature(env, ARM_FEATURE_V4T))
3574 if (!arm_feature(env, ARM_FEATURE_V5))
3575 mask &= ~CPSR_Q; /* V5TE in reality*/
3576 if (!arm_feature(env, ARM_FEATURE_V6))
3577 mask &= ~(CPSR_E | CPSR_GE);
3578 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3580 /* Mask out execution state bits. */
3583 /* Mask out privileged bits. */
3589 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3590 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3594 /* ??? This is also undefined in system mode. */
3598 tmp = load_cpu_field(spsr);
3599 tcg_gen_andi_i32(tmp, tmp, ~mask);
3600 tcg_gen_andi_i32(t0, t0, mask);
3601 tcg_gen_or_i32(tmp, tmp, t0);
3602 store_cpu_field(tmp, spsr);
3604 gen_set_cpsr(t0, mask);
3606 tcg_temp_free_i32(t0);
3611 /* Returns nonzero if access to the PSR is not permitted. */
3612 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3615 tmp = tcg_temp_new_i32();
3616 tcg_gen_movi_i32(tmp, val);
3617 return gen_set_psr(s, mask, spsr, tmp);
3620 /* Generate an old-style exception return. Marks pc as dead. */
3621 static void gen_exception_return(DisasContext *s, TCGv pc)
3624 store_reg(s, 15, pc);
3625 tmp = load_cpu_field(spsr);
3626 gen_set_cpsr(tmp, 0xffffffff);
3627 tcg_temp_free_i32(tmp);
3628 s->is_jmp = DISAS_UPDATE;
3631 /* Generate a v6 exception return. Marks both values as dead. */
3632 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3634 gen_set_cpsr(cpsr, 0xffffffff);
3635 tcg_temp_free_i32(cpsr);
3636 store_reg(s, 15, pc);
3637 s->is_jmp = DISAS_UPDATE;
3641 gen_set_condexec (DisasContext *s)
3643 if (s->condexec_mask) {
3644 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3645 TCGv tmp = tcg_temp_new_i32();
3646 tcg_gen_movi_i32(tmp, val);
3647 store_cpu_field(tmp, condexec_bits);
3651 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3653 gen_set_condexec(s);
3654 gen_set_pc_im(s->pc - offset);
3655 gen_exception(excp);
3656 s->is_jmp = DISAS_JUMP;
3659 static void gen_nop_hint(DisasContext *s, int val)
3663 gen_set_pc_im(s->pc);
3664 s->is_jmp = DISAS_WFI;
3668 /* TODO: Implement SEV and WFE. May help SMP performance. */
3674 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3676 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3679 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3680 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3681 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3686 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3689 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3690 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3691 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3696 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3697 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3698 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3699 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3700 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3702 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3703 switch ((size << 1) | u) { \
3705 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3708 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3711 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3714 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3717 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3720 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3722 default: return 1; \
3725 #define GEN_NEON_INTEGER_OP(name) do { \
3726 switch ((size << 1) | u) { \
3728 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3731 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3734 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3737 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3740 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3743 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3745 default: return 1; \
3748 static TCGv neon_load_scratch(int scratch)
3750 TCGv tmp = tcg_temp_new_i32();
3751 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3755 static void neon_store_scratch(int scratch, TCGv var)
3757 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3758 tcg_temp_free_i32(var);
3761 static inline TCGv neon_get_scalar(int size, int reg)
3765 tmp = neon_load_reg(reg & 7, reg >> 4);
3767 gen_neon_dup_high16(tmp);
3769 gen_neon_dup_low16(tmp);
3772 tmp = neon_load_reg(reg & 15, reg >> 4);
3777 static int gen_neon_unzip(int rd, int rm, int size, int q)
3780 if (!q && size == 2) {
3783 tmp = tcg_const_i32(rd);
3784 tmp2 = tcg_const_i32(rm);
3788 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3791 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3794 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3802 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3805 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3811 tcg_temp_free_i32(tmp);
3812 tcg_temp_free_i32(tmp2);
3816 static int gen_neon_zip(int rd, int rm, int size, int q)
3819 if (!q && size == 2) {
3822 tmp = tcg_const_i32(rd);
3823 tmp2 = tcg_const_i32(rm);
3827 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3830 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3833 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3841 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3844 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3850 tcg_temp_free_i32(tmp);
3851 tcg_temp_free_i32(tmp2);
3855 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3859 rd = tcg_temp_new_i32();
3860 tmp = tcg_temp_new_i32();
3862 tcg_gen_shli_i32(rd, t0, 8);
3863 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3864 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3865 tcg_gen_or_i32(rd, rd, tmp);
3867 tcg_gen_shri_i32(t1, t1, 8);
3868 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3869 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3870 tcg_gen_or_i32(t1, t1, tmp);
3871 tcg_gen_mov_i32(t0, rd);
3873 tcg_temp_free_i32(tmp);
3874 tcg_temp_free_i32(rd);
3877 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3881 rd = tcg_temp_new_i32();
3882 tmp = tcg_temp_new_i32();
3884 tcg_gen_shli_i32(rd, t0, 16);
3885 tcg_gen_andi_i32(tmp, t1, 0xffff);
3886 tcg_gen_or_i32(rd, rd, tmp);
3887 tcg_gen_shri_i32(t1, t1, 16);
3888 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3889 tcg_gen_or_i32(t1, t1, tmp);
3890 tcg_gen_mov_i32(t0, rd);
3892 tcg_temp_free_i32(tmp);
3893 tcg_temp_free_i32(rd);
3901 } neon_ls_element_type[11] = {
3915 /* Translate a NEON load/store element instruction. Return nonzero if the
3916 instruction is invalid. */
3917 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3936 if (!s->vfp_enabled)
3938 VFP_DREG_D(rd, insn);
3939 rn = (insn >> 16) & 0xf;
3941 load = (insn & (1 << 21)) != 0;
3942 if ((insn & (1 << 23)) == 0) {
3943 /* Load store all elements. */
3944 op = (insn >> 8) & 0xf;
3945 size = (insn >> 6) & 3;
3948 /* Catch UNDEF cases for bad values of align field */
3951 if (((insn >> 5) & 1) == 1) {
3956 if (((insn >> 4) & 3) == 3) {
3963 nregs = neon_ls_element_type[op].nregs;
3964 interleave = neon_ls_element_type[op].interleave;
3965 spacing = neon_ls_element_type[op].spacing;
3966 if (size == 3 && (interleave | spacing) != 1)
3968 addr = tcg_temp_new_i32();
3969 load_reg_var(s, addr, rn);
3970 stride = (1 << size) * interleave;
3971 for (reg = 0; reg < nregs; reg++) {
3972 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3973 load_reg_var(s, addr, rn);
3974 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3975 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3976 load_reg_var(s, addr, rn);
3977 tcg_gen_addi_i32(addr, addr, 1 << size);
3981 tmp64 = gen_ld64(addr, IS_USER(s));
3982 neon_store_reg64(tmp64, rd);
3983 tcg_temp_free_i64(tmp64);
3985 tmp64 = tcg_temp_new_i64();
3986 neon_load_reg64(tmp64, rd);
3987 gen_st64(tmp64, addr, IS_USER(s));
3989 tcg_gen_addi_i32(addr, addr, stride);
3991 for (pass = 0; pass < 2; pass++) {
3994 tmp = gen_ld32(addr, IS_USER(s));
3995 neon_store_reg(rd, pass, tmp);
3997 tmp = neon_load_reg(rd, pass);
3998 gen_st32(tmp, addr, IS_USER(s));
4000 tcg_gen_addi_i32(addr, addr, stride);
4001 } else if (size == 1) {
4003 tmp = gen_ld16u(addr, IS_USER(s));
4004 tcg_gen_addi_i32(addr, addr, stride);
4005 tmp2 = gen_ld16u(addr, IS_USER(s));
4006 tcg_gen_addi_i32(addr, addr, stride);
4007 tcg_gen_shli_i32(tmp2, tmp2, 16);
4008 tcg_gen_or_i32(tmp, tmp, tmp2);
4009 tcg_temp_free_i32(tmp2);
4010 neon_store_reg(rd, pass, tmp);
4012 tmp = neon_load_reg(rd, pass);
4013 tmp2 = tcg_temp_new_i32();
4014 tcg_gen_shri_i32(tmp2, tmp, 16);
4015 gen_st16(tmp, addr, IS_USER(s));
4016 tcg_gen_addi_i32(addr, addr, stride);
4017 gen_st16(tmp2, addr, IS_USER(s));
4018 tcg_gen_addi_i32(addr, addr, stride);
4020 } else /* size == 0 */ {
4023 for (n = 0; n < 4; n++) {
4024 tmp = gen_ld8u(addr, IS_USER(s));
4025 tcg_gen_addi_i32(addr, addr, stride);
4029 tcg_gen_shli_i32(tmp, tmp, n * 8);
4030 tcg_gen_or_i32(tmp2, tmp2, tmp);
4031 tcg_temp_free_i32(tmp);
4034 neon_store_reg(rd, pass, tmp2);
4036 tmp2 = neon_load_reg(rd, pass);
4037 for (n = 0; n < 4; n++) {
4038 tmp = tcg_temp_new_i32();
4040 tcg_gen_mov_i32(tmp, tmp2);
4042 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4044 gen_st8(tmp, addr, IS_USER(s));
4045 tcg_gen_addi_i32(addr, addr, stride);
4047 tcg_temp_free_i32(tmp2);
4054 tcg_temp_free_i32(addr);
4057 size = (insn >> 10) & 3;
4059 /* Load single element to all lanes. */
4060 int a = (insn >> 4) & 1;
4064 size = (insn >> 6) & 3;
4065 nregs = ((insn >> 8) & 3) + 1;
4068 if (nregs != 4 || a == 0) {
4071 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4074 if (nregs == 1 && a == 1 && size == 0) {
4077 if (nregs == 3 && a == 1) {
4080 addr = tcg_temp_new_i32();
4081 load_reg_var(s, addr, rn);
4083 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4084 tmp = gen_load_and_replicate(s, addr, size);
4085 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4086 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4087 if (insn & (1 << 5)) {
4088 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4089 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4091 tcg_temp_free_i32(tmp);
4093 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4094 stride = (insn & (1 << 5)) ? 2 : 1;
4095 for (reg = 0; reg < nregs; reg++) {
4096 tmp = gen_load_and_replicate(s, addr, size);
4097 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4098 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4099 tcg_temp_free_i32(tmp);
4100 tcg_gen_addi_i32(addr, addr, 1 << size);
4104 tcg_temp_free_i32(addr);
4105 stride = (1 << size) * nregs;
4107 /* Single element. */
4108 int idx = (insn >> 4) & 0xf;
4109 pass = (insn >> 7) & 1;
4112 shift = ((insn >> 5) & 3) * 8;
4116 shift = ((insn >> 6) & 1) * 16;
4117 stride = (insn & (1 << 5)) ? 2 : 1;
4121 stride = (insn & (1 << 6)) ? 2 : 1;
4126 nregs = ((insn >> 8) & 3) + 1;
4127 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4130 if (((idx & (1 << size)) != 0) ||
4131 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4136 if ((idx & 1) != 0) {
4141 if (size == 2 && (idx & 2) != 0) {
4146 if ((size == 2) && ((idx & 3) == 3)) {
4153 if ((rd + stride * (nregs - 1)) > 31) {
4154 /* Attempts to write off the end of the register file
4155 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4156 * the neon_load_reg() would write off the end of the array.
4160 addr = tcg_temp_new_i32();
4161 load_reg_var(s, addr, rn);
4162 for (reg = 0; reg < nregs; reg++) {
4166 tmp = gen_ld8u(addr, IS_USER(s));
4169 tmp = gen_ld16u(addr, IS_USER(s));
4172 tmp = gen_ld32(addr, IS_USER(s));
4174 default: /* Avoid compiler warnings. */
4178 tmp2 = neon_load_reg(rd, pass);
4179 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4180 tcg_temp_free_i32(tmp2);
4182 neon_store_reg(rd, pass, tmp);
4183 } else { /* Store */
4184 tmp = neon_load_reg(rd, pass);
4186 tcg_gen_shri_i32(tmp, tmp, shift);
4189 gen_st8(tmp, addr, IS_USER(s));
4192 gen_st16(tmp, addr, IS_USER(s));
4195 gen_st32(tmp, addr, IS_USER(s));
4200 tcg_gen_addi_i32(addr, addr, 1 << size);
4202 tcg_temp_free_i32(addr);
4203 stride = nregs * (1 << size);
4209 base = load_reg(s, rn);
4211 tcg_gen_addi_i32(base, base, stride);
4214 index = load_reg(s, rm);
4215 tcg_gen_add_i32(base, base, index);
4216 tcg_temp_free_i32(index);
4218 store_reg(s, rn, base);
4223 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4224 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4226 tcg_gen_and_i32(t, t, c);
4227 tcg_gen_andc_i32(f, f, c);
4228 tcg_gen_or_i32(dest, t, f);
4231 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4234 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4235 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4236 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4241 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4244 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4245 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4246 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4251 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4254 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4255 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4256 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4261 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4264 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4265 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4266 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4271 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4277 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4278 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4283 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4284 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4291 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4292 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4297 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4298 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4305 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4309 case 0: gen_helper_neon_widen_u8(dest, src); break;
4310 case 1: gen_helper_neon_widen_u16(dest, src); break;
4311 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4316 case 0: gen_helper_neon_widen_s8(dest, src); break;
4317 case 1: gen_helper_neon_widen_s16(dest, src); break;
4318 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4322 tcg_temp_free_i32(src);
4325 static inline void gen_neon_addl(int size)
4328 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4329 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4330 case 2: tcg_gen_add_i64(CPU_V001); break;
4335 static inline void gen_neon_subl(int size)
4338 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4339 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4340 case 2: tcg_gen_sub_i64(CPU_V001); break;
4345 static inline void gen_neon_negl(TCGv_i64 var, int size)
4348 case 0: gen_helper_neon_negl_u16(var, var); break;
4349 case 1: gen_helper_neon_negl_u32(var, var); break;
4350 case 2: gen_helper_neon_negl_u64(var, var); break;
4355 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4358 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4359 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4364 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4368 switch ((size << 1) | u) {
4369 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4370 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4371 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4372 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4374 tmp = gen_muls_i64_i32(a, b);
4375 tcg_gen_mov_i64(dest, tmp);
4376 tcg_temp_free_i64(tmp);
4379 tmp = gen_mulu_i64_i32(a, b);
4380 tcg_gen_mov_i64(dest, tmp);
4381 tcg_temp_free_i64(tmp);
4386 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4387 Don't forget to clean them now. */
4389 tcg_temp_free_i32(a);
4390 tcg_temp_free_i32(b);
4394 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4398 gen_neon_unarrow_sats(size, dest, src);
4400 gen_neon_narrow(size, dest, src);
4404 gen_neon_narrow_satu(size, dest, src);
4406 gen_neon_narrow_sats(size, dest, src);
4411 /* Symbolic constants for op fields for Neon 3-register same-length.
4412 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4415 #define NEON_3R_VHADD 0
4416 #define NEON_3R_VQADD 1
4417 #define NEON_3R_VRHADD 2
4418 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4419 #define NEON_3R_VHSUB 4
4420 #define NEON_3R_VQSUB 5
4421 #define NEON_3R_VCGT 6
4422 #define NEON_3R_VCGE 7
4423 #define NEON_3R_VSHL 8
4424 #define NEON_3R_VQSHL 9
4425 #define NEON_3R_VRSHL 10
4426 #define NEON_3R_VQRSHL 11
4427 #define NEON_3R_VMAX 12
4428 #define NEON_3R_VMIN 13
4429 #define NEON_3R_VABD 14
4430 #define NEON_3R_VABA 15
4431 #define NEON_3R_VADD_VSUB 16
4432 #define NEON_3R_VTST_VCEQ 17
4433 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4434 #define NEON_3R_VMUL 19
4435 #define NEON_3R_VPMAX 20
4436 #define NEON_3R_VPMIN 21
4437 #define NEON_3R_VQDMULH_VQRDMULH 22
4438 #define NEON_3R_VPADD 23
4439 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4440 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4441 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4442 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4443 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4444 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4445 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4447 static const uint8_t neon_3r_sizes[] = {
4448 [NEON_3R_VHADD] = 0x7,
4449 [NEON_3R_VQADD] = 0xf,
4450 [NEON_3R_VRHADD] = 0x7,
4451 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4452 [NEON_3R_VHSUB] = 0x7,
4453 [NEON_3R_VQSUB] = 0xf,
4454 [NEON_3R_VCGT] = 0x7,
4455 [NEON_3R_VCGE] = 0x7,
4456 [NEON_3R_VSHL] = 0xf,
4457 [NEON_3R_VQSHL] = 0xf,
4458 [NEON_3R_VRSHL] = 0xf,
4459 [NEON_3R_VQRSHL] = 0xf,
4460 [NEON_3R_VMAX] = 0x7,
4461 [NEON_3R_VMIN] = 0x7,
4462 [NEON_3R_VABD] = 0x7,
4463 [NEON_3R_VABA] = 0x7,
4464 [NEON_3R_VADD_VSUB] = 0xf,
4465 [NEON_3R_VTST_VCEQ] = 0x7,
4466 [NEON_3R_VML] = 0x7,
4467 [NEON_3R_VMUL] = 0x7,
4468 [NEON_3R_VPMAX] = 0x7,
4469 [NEON_3R_VPMIN] = 0x7,
4470 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4471 [NEON_3R_VPADD] = 0x7,
4472 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4473 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4474 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4475 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4476 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4477 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4478 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4481 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4482 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4485 #define NEON_2RM_VREV64 0
4486 #define NEON_2RM_VREV32 1
4487 #define NEON_2RM_VREV16 2
4488 #define NEON_2RM_VPADDL 4
4489 #define NEON_2RM_VPADDL_U 5
4490 #define NEON_2RM_VCLS 8
4491 #define NEON_2RM_VCLZ 9
4492 #define NEON_2RM_VCNT 10
4493 #define NEON_2RM_VMVN 11
4494 #define NEON_2RM_VPADAL 12
4495 #define NEON_2RM_VPADAL_U 13
4496 #define NEON_2RM_VQABS 14
4497 #define NEON_2RM_VQNEG 15
4498 #define NEON_2RM_VCGT0 16
4499 #define NEON_2RM_VCGE0 17
4500 #define NEON_2RM_VCEQ0 18
4501 #define NEON_2RM_VCLE0 19
4502 #define NEON_2RM_VCLT0 20
4503 #define NEON_2RM_VABS 22
4504 #define NEON_2RM_VNEG 23
4505 #define NEON_2RM_VCGT0_F 24
4506 #define NEON_2RM_VCGE0_F 25
4507 #define NEON_2RM_VCEQ0_F 26
4508 #define NEON_2RM_VCLE0_F 27
4509 #define NEON_2RM_VCLT0_F 28
4510 #define NEON_2RM_VABS_F 30
4511 #define NEON_2RM_VNEG_F 31
4512 #define NEON_2RM_VSWP 32
4513 #define NEON_2RM_VTRN 33
4514 #define NEON_2RM_VUZP 34
4515 #define NEON_2RM_VZIP 35
4516 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4517 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4518 #define NEON_2RM_VSHLL 38
4519 #define NEON_2RM_VCVT_F16_F32 44
4520 #define NEON_2RM_VCVT_F32_F16 46
4521 #define NEON_2RM_VRECPE 56
4522 #define NEON_2RM_VRSQRTE 57
4523 #define NEON_2RM_VRECPE_F 58
4524 #define NEON_2RM_VRSQRTE_F 59
4525 #define NEON_2RM_VCVT_FS 60
4526 #define NEON_2RM_VCVT_FU 61
4527 #define NEON_2RM_VCVT_SF 62
4528 #define NEON_2RM_VCVT_UF 63
4530 static int neon_2rm_is_float_op(int op)
4532 /* Return true if this neon 2reg-misc op is float-to-float */
4533 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4534 op >= NEON_2RM_VRECPE_F);
4537 /* Each entry in this array has bit n set if the insn allows
4538 * size value n (otherwise it will UNDEF). Since unallocated
4539 * op values will have no bits set they always UNDEF.
4541 static const uint8_t neon_2rm_sizes[] = {
4542 [NEON_2RM_VREV64] = 0x7,
4543 [NEON_2RM_VREV32] = 0x3,
4544 [NEON_2RM_VREV16] = 0x1,
4545 [NEON_2RM_VPADDL] = 0x7,
4546 [NEON_2RM_VPADDL_U] = 0x7,
4547 [NEON_2RM_VCLS] = 0x7,
4548 [NEON_2RM_VCLZ] = 0x7,
4549 [NEON_2RM_VCNT] = 0x1,
4550 [NEON_2RM_VMVN] = 0x1,
4551 [NEON_2RM_VPADAL] = 0x7,
4552 [NEON_2RM_VPADAL_U] = 0x7,
4553 [NEON_2RM_VQABS] = 0x7,
4554 [NEON_2RM_VQNEG] = 0x7,
4555 [NEON_2RM_VCGT0] = 0x7,
4556 [NEON_2RM_VCGE0] = 0x7,
4557 [NEON_2RM_VCEQ0] = 0x7,
4558 [NEON_2RM_VCLE0] = 0x7,
4559 [NEON_2RM_VCLT0] = 0x7,
4560 [NEON_2RM_VABS] = 0x7,
4561 [NEON_2RM_VNEG] = 0x7,
4562 [NEON_2RM_VCGT0_F] = 0x4,
4563 [NEON_2RM_VCGE0_F] = 0x4,
4564 [NEON_2RM_VCEQ0_F] = 0x4,
4565 [NEON_2RM_VCLE0_F] = 0x4,
4566 [NEON_2RM_VCLT0_F] = 0x4,
4567 [NEON_2RM_VABS_F] = 0x4,
4568 [NEON_2RM_VNEG_F] = 0x4,
4569 [NEON_2RM_VSWP] = 0x1,
4570 [NEON_2RM_VTRN] = 0x7,
4571 [NEON_2RM_VUZP] = 0x7,
4572 [NEON_2RM_VZIP] = 0x7,
4573 [NEON_2RM_VMOVN] = 0x7,
4574 [NEON_2RM_VQMOVN] = 0x7,
4575 [NEON_2RM_VSHLL] = 0x7,
4576 [NEON_2RM_VCVT_F16_F32] = 0x2,
4577 [NEON_2RM_VCVT_F32_F16] = 0x2,
4578 [NEON_2RM_VRECPE] = 0x4,
4579 [NEON_2RM_VRSQRTE] = 0x4,
4580 [NEON_2RM_VRECPE_F] = 0x4,
4581 [NEON_2RM_VRSQRTE_F] = 0x4,
4582 [NEON_2RM_VCVT_FS] = 0x4,
4583 [NEON_2RM_VCVT_FU] = 0x4,
4584 [NEON_2RM_VCVT_SF] = 0x4,
4585 [NEON_2RM_VCVT_UF] = 0x4,
4588 /* Translate a NEON data processing instruction. Return nonzero if the
4589 instruction is invalid.
4590 We process data in a mixture of 32-bit and 64-bit chunks.
4591 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4593 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4605 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4608 if (!s->vfp_enabled)
4610 q = (insn & (1 << 6)) != 0;
4611 u = (insn >> 24) & 1;
4612 VFP_DREG_D(rd, insn);
4613 VFP_DREG_N(rn, insn);
4614 VFP_DREG_M(rm, insn);
4615 size = (insn >> 20) & 3;
4616 if ((insn & (1 << 23)) == 0) {
4617 /* Three register same length. */
4618 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4619 /* Catch invalid op and bad size combinations: UNDEF */
4620 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4623 /* All insns of this form UNDEF for either this condition or the
4624 * superset of cases "Q==1"; we catch the latter later.
4626 if (q && ((rd | rn | rm) & 1)) {
4629 if (size == 3 && op != NEON_3R_LOGIC) {
4630 /* 64-bit element instructions. */
4631 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4632 neon_load_reg64(cpu_V0, rn + pass);
4633 neon_load_reg64(cpu_V1, rm + pass);
4637 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4640 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4646 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4649 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4655 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4657 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4662 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4665 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4671 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4673 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4676 case NEON_3R_VQRSHL:
4678 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4681 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4685 case NEON_3R_VADD_VSUB:
4687 tcg_gen_sub_i64(CPU_V001);
4689 tcg_gen_add_i64(CPU_V001);
4695 neon_store_reg64(cpu_V0, rd + pass);
4704 case NEON_3R_VQRSHL:
4707 /* Shift instruction operands are reversed. */
4722 case NEON_3R_FLOAT_ARITH:
4723 pairwise = (u && size < 2); /* if VPADD (float) */
4725 case NEON_3R_FLOAT_MINMAX:
4726 pairwise = u; /* if VPMIN/VPMAX (float) */
4728 case NEON_3R_FLOAT_CMP:
4730 /* no encoding for U=0 C=1x */
4734 case NEON_3R_FLOAT_ACMP:
4739 case NEON_3R_VRECPS_VRSQRTS:
4745 if (u && (size != 0)) {
4746 /* UNDEF on invalid size for polynomial subcase */
4751 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4759 if (pairwise && q) {
4760 /* All the pairwise insns UNDEF if Q is set */
4764 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4769 tmp = neon_load_reg(rn, 0);
4770 tmp2 = neon_load_reg(rn, 1);
4772 tmp = neon_load_reg(rm, 0);
4773 tmp2 = neon_load_reg(rm, 1);
4777 tmp = neon_load_reg(rn, pass);
4778 tmp2 = neon_load_reg(rm, pass);
4782 GEN_NEON_INTEGER_OP(hadd);
4785 GEN_NEON_INTEGER_OP_ENV(qadd);
4787 case NEON_3R_VRHADD:
4788 GEN_NEON_INTEGER_OP(rhadd);
4790 case NEON_3R_LOGIC: /* Logic ops. */
4791 switch ((u << 2) | size) {
4793 tcg_gen_and_i32(tmp, tmp, tmp2);
4796 tcg_gen_andc_i32(tmp, tmp, tmp2);
4799 tcg_gen_or_i32(tmp, tmp, tmp2);
4802 tcg_gen_orc_i32(tmp, tmp, tmp2);
4805 tcg_gen_xor_i32(tmp, tmp, tmp2);
4808 tmp3 = neon_load_reg(rd, pass);
4809 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4810 tcg_temp_free_i32(tmp3);
4813 tmp3 = neon_load_reg(rd, pass);
4814 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4815 tcg_temp_free_i32(tmp3);
4818 tmp3 = neon_load_reg(rd, pass);
4819 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4820 tcg_temp_free_i32(tmp3);
4825 GEN_NEON_INTEGER_OP(hsub);
4828 GEN_NEON_INTEGER_OP_ENV(qsub);
4831 GEN_NEON_INTEGER_OP(cgt);
4834 GEN_NEON_INTEGER_OP(cge);
4837 GEN_NEON_INTEGER_OP(shl);
4840 GEN_NEON_INTEGER_OP_ENV(qshl);
4843 GEN_NEON_INTEGER_OP(rshl);
4845 case NEON_3R_VQRSHL:
4846 GEN_NEON_INTEGER_OP_ENV(qrshl);
4849 GEN_NEON_INTEGER_OP(max);
4852 GEN_NEON_INTEGER_OP(min);
4855 GEN_NEON_INTEGER_OP(abd);
4858 GEN_NEON_INTEGER_OP(abd);
4859 tcg_temp_free_i32(tmp2);
4860 tmp2 = neon_load_reg(rd, pass);
4861 gen_neon_add(size, tmp, tmp2);
4863 case NEON_3R_VADD_VSUB:
4864 if (!u) { /* VADD */
4865 gen_neon_add(size, tmp, tmp2);
4868 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4869 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4870 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4875 case NEON_3R_VTST_VCEQ:
4876 if (!u) { /* VTST */
4878 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4879 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4880 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4885 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4886 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4887 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4892 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4894 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4895 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4896 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4899 tcg_temp_free_i32(tmp2);
4900 tmp2 = neon_load_reg(rd, pass);
4902 gen_neon_rsb(size, tmp, tmp2);
4904 gen_neon_add(size, tmp, tmp2);
4908 if (u) { /* polynomial */
4909 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4910 } else { /* Integer */
4912 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4913 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4914 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4920 GEN_NEON_INTEGER_OP(pmax);
4923 GEN_NEON_INTEGER_OP(pmin);
4925 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4926 if (!u) { /* VQDMULH */
4929 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4932 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4936 } else { /* VQRDMULH */
4939 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4942 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4950 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4951 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4952 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4956 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4958 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4959 switch ((u << 2) | size) {
4962 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4965 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4968 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4973 tcg_temp_free_ptr(fpstatus);
4976 case NEON_3R_FLOAT_MULTIPLY:
4978 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4979 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4981 tcg_temp_free_i32(tmp2);
4982 tmp2 = neon_load_reg(rd, pass);
4984 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4986 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4989 tcg_temp_free_ptr(fpstatus);
4992 case NEON_3R_FLOAT_CMP:
4994 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4996 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4999 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5001 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5004 tcg_temp_free_ptr(fpstatus);
5007 case NEON_3R_FLOAT_ACMP:
5009 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5011 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5013 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5015 tcg_temp_free_ptr(fpstatus);
5018 case NEON_3R_FLOAT_MINMAX:
5020 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5022 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
5024 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
5026 tcg_temp_free_ptr(fpstatus);
5029 case NEON_3R_VRECPS_VRSQRTS:
5031 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5033 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5037 /* VFMA, VFMS: fused multiply-add */
5038 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5039 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5042 gen_helper_vfp_negs(tmp, tmp);
5044 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5045 tcg_temp_free_i32(tmp3);
5046 tcg_temp_free_ptr(fpstatus);
5052 tcg_temp_free_i32(tmp2);
5054 /* Save the result. For elementwise operations we can put it
5055 straight into the destination register. For pairwise operations
5056 we have to be careful to avoid clobbering the source operands. */
5057 if (pairwise && rd == rm) {
5058 neon_store_scratch(pass, tmp);
5060 neon_store_reg(rd, pass, tmp);
5064 if (pairwise && rd == rm) {
5065 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5066 tmp = neon_load_scratch(pass);
5067 neon_store_reg(rd, pass, tmp);
5070 /* End of 3 register same size operations. */
5071 } else if (insn & (1 << 4)) {
5072 if ((insn & 0x00380080) != 0) {
5073 /* Two registers and shift. */
5074 op = (insn >> 8) & 0xf;
5075 if (insn & (1 << 7)) {
5083 while ((insn & (1 << (size + 19))) == 0)
5086 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5087 /* To avoid excessive dumplication of ops we implement shift
5088 by immediate using the variable shift operations. */
5090 /* Shift by immediate:
5091 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5092 if (q && ((rd | rm) & 1)) {
5095 if (!u && (op == 4 || op == 6)) {
5098 /* Right shifts are encoded as N - shift, where N is the
5099 element size in bits. */
5101 shift = shift - (1 << (size + 3));
5109 imm = (uint8_t) shift;
5114 imm = (uint16_t) shift;
5125 for (pass = 0; pass < count; pass++) {
5127 neon_load_reg64(cpu_V0, rm + pass);
5128 tcg_gen_movi_i64(cpu_V1, imm);
5133 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5135 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5140 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5142 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5145 case 5: /* VSHL, VSLI */
5146 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5148 case 6: /* VQSHLU */
5149 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5154 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5157 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5162 if (op == 1 || op == 3) {
5164 neon_load_reg64(cpu_V1, rd + pass);
5165 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5166 } else if (op == 4 || (op == 5 && u)) {
5168 neon_load_reg64(cpu_V1, rd + pass);
5170 if (shift < -63 || shift > 63) {
5174 mask = 0xffffffffffffffffull >> -shift;
5176 mask = 0xffffffffffffffffull << shift;
5179 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5180 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5182 neon_store_reg64(cpu_V0, rd + pass);
5183 } else { /* size < 3 */
5184 /* Operands in T0 and T1. */
5185 tmp = neon_load_reg(rm, pass);
5186 tmp2 = tcg_temp_new_i32();
5187 tcg_gen_movi_i32(tmp2, imm);
5191 GEN_NEON_INTEGER_OP(shl);
5195 GEN_NEON_INTEGER_OP(rshl);
5198 case 5: /* VSHL, VSLI */
5200 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5201 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5202 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5206 case 6: /* VQSHLU */
5209 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5213 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5217 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5225 GEN_NEON_INTEGER_OP_ENV(qshl);
5228 tcg_temp_free_i32(tmp2);
5230 if (op == 1 || op == 3) {
5232 tmp2 = neon_load_reg(rd, pass);
5233 gen_neon_add(size, tmp, tmp2);
5234 tcg_temp_free_i32(tmp2);
5235 } else if (op == 4 || (op == 5 && u)) {
5240 mask = 0xff >> -shift;
5242 mask = (uint8_t)(0xff << shift);
5248 mask = 0xffff >> -shift;
5250 mask = (uint16_t)(0xffff << shift);
5254 if (shift < -31 || shift > 31) {
5258 mask = 0xffffffffu >> -shift;
5260 mask = 0xffffffffu << shift;
5266 tmp2 = neon_load_reg(rd, pass);
5267 tcg_gen_andi_i32(tmp, tmp, mask);
5268 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5269 tcg_gen_or_i32(tmp, tmp, tmp2);
5270 tcg_temp_free_i32(tmp2);
5272 neon_store_reg(rd, pass, tmp);
5275 } else if (op < 10) {
5276 /* Shift by immediate and narrow:
5277 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5278 int input_unsigned = (op == 8) ? !u : u;
5282 shift = shift - (1 << (size + 3));
5285 tmp64 = tcg_const_i64(shift);
5286 neon_load_reg64(cpu_V0, rm);
5287 neon_load_reg64(cpu_V1, rm + 1);
5288 for (pass = 0; pass < 2; pass++) {
5296 if (input_unsigned) {
5297 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5299 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5302 if (input_unsigned) {
5303 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5305 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5308 tmp = tcg_temp_new_i32();
5309 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5310 neon_store_reg(rd, pass, tmp);
5312 tcg_temp_free_i64(tmp64);
5315 imm = (uint16_t)shift;
5319 imm = (uint32_t)shift;
5321 tmp2 = tcg_const_i32(imm);
5322 tmp4 = neon_load_reg(rm + 1, 0);
5323 tmp5 = neon_load_reg(rm + 1, 1);
5324 for (pass = 0; pass < 2; pass++) {
5326 tmp = neon_load_reg(rm, 0);
5330 gen_neon_shift_narrow(size, tmp, tmp2, q,
5333 tmp3 = neon_load_reg(rm, 1);
5337 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5339 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5340 tcg_temp_free_i32(tmp);
5341 tcg_temp_free_i32(tmp3);
5342 tmp = tcg_temp_new_i32();
5343 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5344 neon_store_reg(rd, pass, tmp);
5346 tcg_temp_free_i32(tmp2);
5348 } else if (op == 10) {
5350 if (q || (rd & 1)) {
5353 tmp = neon_load_reg(rm, 0);
5354 tmp2 = neon_load_reg(rm, 1);
5355 for (pass = 0; pass < 2; pass++) {
5359 gen_neon_widen(cpu_V0, tmp, size, u);
5362 /* The shift is less than the width of the source
5363 type, so we can just shift the whole register. */
5364 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5365 /* Widen the result of shift: we need to clear
5366 * the potential overflow bits resulting from
5367 * left bits of the narrow input appearing as
5368 * right bits of left the neighbour narrow
5370 if (size < 2 || !u) {
5373 imm = (0xffu >> (8 - shift));
5375 } else if (size == 1) {
5376 imm = 0xffff >> (16 - shift);
5379 imm = 0xffffffff >> (32 - shift);
5382 imm64 = imm | (((uint64_t)imm) << 32);
5386 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5389 neon_store_reg64(cpu_V0, rd + pass);
5391 } else if (op >= 14) {
5392 /* VCVT fixed-point. */
5393 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5396 /* We have already masked out the must-be-1 top bit of imm6,
5397 * hence this 32-shift where the ARM ARM has 64-imm6.
5400 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5401 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5404 gen_vfp_ulto(0, shift, 1);
5406 gen_vfp_slto(0, shift, 1);
5409 gen_vfp_toul(0, shift, 1);
5411 gen_vfp_tosl(0, shift, 1);
5413 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5418 } else { /* (insn & 0x00380080) == 0 */
5420 if (q && (rd & 1)) {
5424 op = (insn >> 8) & 0xf;
5425 /* One register and immediate. */
5426 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5427 invert = (insn & (1 << 5)) != 0;
5428 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5429 * We choose to not special-case this and will behave as if a
5430 * valid constant encoding of 0 had been given.
5449 imm = (imm << 8) | (imm << 24);
5452 imm = (imm << 8) | 0xff;
5455 imm = (imm << 16) | 0xffff;
5458 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5466 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5467 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5473 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5474 if (op & 1 && op < 12) {
5475 tmp = neon_load_reg(rd, pass);
5477 /* The immediate value has already been inverted, so
5479 tcg_gen_andi_i32(tmp, tmp, imm);
5481 tcg_gen_ori_i32(tmp, tmp, imm);
5485 tmp = tcg_temp_new_i32();
5486 if (op == 14 && invert) {
5490 for (n = 0; n < 4; n++) {
5491 if (imm & (1 << (n + (pass & 1) * 4)))
5492 val |= 0xff << (n * 8);
5494 tcg_gen_movi_i32(tmp, val);
5496 tcg_gen_movi_i32(tmp, imm);
5499 neon_store_reg(rd, pass, tmp);
5502 } else { /* (insn & 0x00800010 == 0x00800000) */
5504 op = (insn >> 8) & 0xf;
5505 if ((insn & (1 << 6)) == 0) {
5506 /* Three registers of different lengths. */
5510 /* undefreq: bit 0 : UNDEF if size != 0
5511 * bit 1 : UNDEF if size == 0
5512 * bit 2 : UNDEF if U == 1
5513 * Note that [1:0] set implies 'always UNDEF'
5516 /* prewiden, src1_wide, src2_wide, undefreq */
5517 static const int neon_3reg_wide[16][4] = {
5518 {1, 0, 0, 0}, /* VADDL */
5519 {1, 1, 0, 0}, /* VADDW */
5520 {1, 0, 0, 0}, /* VSUBL */
5521 {1, 1, 0, 0}, /* VSUBW */
5522 {0, 1, 1, 0}, /* VADDHN */
5523 {0, 0, 0, 0}, /* VABAL */
5524 {0, 1, 1, 0}, /* VSUBHN */
5525 {0, 0, 0, 0}, /* VABDL */
5526 {0, 0, 0, 0}, /* VMLAL */
5527 {0, 0, 0, 6}, /* VQDMLAL */
5528 {0, 0, 0, 0}, /* VMLSL */
5529 {0, 0, 0, 6}, /* VQDMLSL */
5530 {0, 0, 0, 0}, /* Integer VMULL */
5531 {0, 0, 0, 2}, /* VQDMULL */
5532 {0, 0, 0, 5}, /* Polynomial VMULL */
5533 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5536 prewiden = neon_3reg_wide[op][0];
5537 src1_wide = neon_3reg_wide[op][1];
5538 src2_wide = neon_3reg_wide[op][2];
5539 undefreq = neon_3reg_wide[op][3];
5541 if (((undefreq & 1) && (size != 0)) ||
5542 ((undefreq & 2) && (size == 0)) ||
5543 ((undefreq & 4) && u)) {
5546 if ((src1_wide && (rn & 1)) ||
5547 (src2_wide && (rm & 1)) ||
5548 (!src2_wide && (rd & 1))) {
5552 /* Avoid overlapping operands. Wide source operands are
5553 always aligned so will never overlap with wide
5554 destinations in problematic ways. */
5555 if (rd == rm && !src2_wide) {
5556 tmp = neon_load_reg(rm, 1);
5557 neon_store_scratch(2, tmp);
5558 } else if (rd == rn && !src1_wide) {
5559 tmp = neon_load_reg(rn, 1);
5560 neon_store_scratch(2, tmp);
5563 for (pass = 0; pass < 2; pass++) {
5565 neon_load_reg64(cpu_V0, rn + pass);
5568 if (pass == 1 && rd == rn) {
5569 tmp = neon_load_scratch(2);
5571 tmp = neon_load_reg(rn, pass);
5574 gen_neon_widen(cpu_V0, tmp, size, u);
5578 neon_load_reg64(cpu_V1, rm + pass);
5581 if (pass == 1 && rd == rm) {
5582 tmp2 = neon_load_scratch(2);
5584 tmp2 = neon_load_reg(rm, pass);
5587 gen_neon_widen(cpu_V1, tmp2, size, u);
5591 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5592 gen_neon_addl(size);
5594 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5595 gen_neon_subl(size);
5597 case 5: case 7: /* VABAL, VABDL */
5598 switch ((size << 1) | u) {
5600 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5603 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5606 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5609 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5612 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5615 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5619 tcg_temp_free_i32(tmp2);
5620 tcg_temp_free_i32(tmp);
5622 case 8: case 9: case 10: case 11: case 12: case 13:
5623 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5624 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5626 case 14: /* Polynomial VMULL */
5627 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5628 tcg_temp_free_i32(tmp2);
5629 tcg_temp_free_i32(tmp);
5631 default: /* 15 is RESERVED: caught earlier */
5636 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5637 neon_store_reg64(cpu_V0, rd + pass);
5638 } else if (op == 5 || (op >= 8 && op <= 11)) {
5640 neon_load_reg64(cpu_V1, rd + pass);
5642 case 10: /* VMLSL */
5643 gen_neon_negl(cpu_V0, size);
5645 case 5: case 8: /* VABAL, VMLAL */
5646 gen_neon_addl(size);
5648 case 9: case 11: /* VQDMLAL, VQDMLSL */
5649 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5651 gen_neon_negl(cpu_V0, size);
5653 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5658 neon_store_reg64(cpu_V0, rd + pass);
5659 } else if (op == 4 || op == 6) {
5660 /* Narrowing operation. */
5661 tmp = tcg_temp_new_i32();
5665 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5668 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5671 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5672 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5679 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5682 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5685 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5686 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5687 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5695 neon_store_reg(rd, 0, tmp3);
5696 neon_store_reg(rd, 1, tmp);
5699 /* Write back the result. */
5700 neon_store_reg64(cpu_V0, rd + pass);
5704 /* Two registers and a scalar. NB that for ops of this form
5705 * the ARM ARM labels bit 24 as Q, but it is in our variable
5712 case 1: /* Float VMLA scalar */
5713 case 5: /* Floating point VMLS scalar */
5714 case 9: /* Floating point VMUL scalar */
5719 case 0: /* Integer VMLA scalar */
5720 case 4: /* Integer VMLS scalar */
5721 case 8: /* Integer VMUL scalar */
5722 case 12: /* VQDMULH scalar */
5723 case 13: /* VQRDMULH scalar */
5724 if (u && ((rd | rn) & 1)) {
5727 tmp = neon_get_scalar(size, rm);
5728 neon_store_scratch(0, tmp);
5729 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5730 tmp = neon_load_scratch(0);
5731 tmp2 = neon_load_reg(rn, pass);
5734 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5736 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5738 } else if (op == 13) {
5740 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5742 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5744 } else if (op & 1) {
5745 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5746 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5747 tcg_temp_free_ptr(fpstatus);
5750 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5751 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5752 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5756 tcg_temp_free_i32(tmp2);
5759 tmp2 = neon_load_reg(rd, pass);
5762 gen_neon_add(size, tmp, tmp2);
5766 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5767 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5768 tcg_temp_free_ptr(fpstatus);
5772 gen_neon_rsb(size, tmp, tmp2);
5776 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5777 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5778 tcg_temp_free_ptr(fpstatus);
5784 tcg_temp_free_i32(tmp2);
5786 neon_store_reg(rd, pass, tmp);
5789 case 3: /* VQDMLAL scalar */
5790 case 7: /* VQDMLSL scalar */
5791 case 11: /* VQDMULL scalar */
5796 case 2: /* VMLAL sclar */
5797 case 6: /* VMLSL scalar */
5798 case 10: /* VMULL scalar */
5802 tmp2 = neon_get_scalar(size, rm);
5803 /* We need a copy of tmp2 because gen_neon_mull
5804 * deletes it during pass 0. */
5805 tmp4 = tcg_temp_new_i32();
5806 tcg_gen_mov_i32(tmp4, tmp2);
5807 tmp3 = neon_load_reg(rn, 1);
5809 for (pass = 0; pass < 2; pass++) {
5811 tmp = neon_load_reg(rn, 0);
5816 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5818 neon_load_reg64(cpu_V1, rd + pass);
5822 gen_neon_negl(cpu_V0, size);
5825 gen_neon_addl(size);
5828 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5830 gen_neon_negl(cpu_V0, size);
5832 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5838 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5843 neon_store_reg64(cpu_V0, rd + pass);
5848 default: /* 14 and 15 are RESERVED */
5852 } else { /* size == 3 */
5855 imm = (insn >> 8) & 0xf;
5860 if (q && ((rd | rn | rm) & 1)) {
5865 neon_load_reg64(cpu_V0, rn);
5867 neon_load_reg64(cpu_V1, rn + 1);
5869 } else if (imm == 8) {
5870 neon_load_reg64(cpu_V0, rn + 1);
5872 neon_load_reg64(cpu_V1, rm);
5875 tmp64 = tcg_temp_new_i64();
5877 neon_load_reg64(cpu_V0, rn);
5878 neon_load_reg64(tmp64, rn + 1);
5880 neon_load_reg64(cpu_V0, rn + 1);
5881 neon_load_reg64(tmp64, rm);
5883 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5884 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5885 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5887 neon_load_reg64(cpu_V1, rm);
5889 neon_load_reg64(cpu_V1, rm + 1);
5892 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5893 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5894 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5895 tcg_temp_free_i64(tmp64);
5898 neon_load_reg64(cpu_V0, rn);
5899 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5900 neon_load_reg64(cpu_V1, rm);
5901 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5902 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5904 neon_store_reg64(cpu_V0, rd);
5906 neon_store_reg64(cpu_V1, rd + 1);
5908 } else if ((insn & (1 << 11)) == 0) {
5909 /* Two register misc. */
5910 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5911 size = (insn >> 18) & 3;
5912 /* UNDEF for unknown op values and bad op-size combinations */
5913 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5916 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5917 q && ((rm | rd) & 1)) {
5921 case NEON_2RM_VREV64:
5922 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5923 tmp = neon_load_reg(rm, pass * 2);
5924 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5926 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5927 case 1: gen_swap_half(tmp); break;
5928 case 2: /* no-op */ break;
5931 neon_store_reg(rd, pass * 2 + 1, tmp);
5933 neon_store_reg(rd, pass * 2, tmp2);
5936 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5937 case 1: gen_swap_half(tmp2); break;
5940 neon_store_reg(rd, pass * 2, tmp2);
5944 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5945 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5946 for (pass = 0; pass < q + 1; pass++) {
5947 tmp = neon_load_reg(rm, pass * 2);
5948 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5949 tmp = neon_load_reg(rm, pass * 2 + 1);
5950 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5952 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5953 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5954 case 2: tcg_gen_add_i64(CPU_V001); break;
5957 if (op >= NEON_2RM_VPADAL) {
5959 neon_load_reg64(cpu_V1, rd + pass);
5960 gen_neon_addl(size);
5962 neon_store_reg64(cpu_V0, rd + pass);
5968 for (n = 0; n < (q ? 4 : 2); n += 2) {
5969 tmp = neon_load_reg(rm, n);
5970 tmp2 = neon_load_reg(rd, n + 1);
5971 neon_store_reg(rm, n, tmp2);
5972 neon_store_reg(rd, n + 1, tmp);
5979 if (gen_neon_unzip(rd, rm, size, q)) {
5984 if (gen_neon_zip(rd, rm, size, q)) {
5988 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5989 /* also VQMOVUN; op field and mnemonics don't line up */
5994 for (pass = 0; pass < 2; pass++) {
5995 neon_load_reg64(cpu_V0, rm + pass);
5996 tmp = tcg_temp_new_i32();
5997 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6002 neon_store_reg(rd, 0, tmp2);
6003 neon_store_reg(rd, 1, tmp);
6007 case NEON_2RM_VSHLL:
6008 if (q || (rd & 1)) {
6011 tmp = neon_load_reg(rm, 0);
6012 tmp2 = neon_load_reg(rm, 1);
6013 for (pass = 0; pass < 2; pass++) {
6016 gen_neon_widen(cpu_V0, tmp, size, 1);
6017 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6018 neon_store_reg64(cpu_V0, rd + pass);
6021 case NEON_2RM_VCVT_F16_F32:
6022 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6026 tmp = tcg_temp_new_i32();
6027 tmp2 = tcg_temp_new_i32();
6028 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6029 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6030 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6031 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6032 tcg_gen_shli_i32(tmp2, tmp2, 16);
6033 tcg_gen_or_i32(tmp2, tmp2, tmp);
6034 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6035 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6036 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6037 neon_store_reg(rd, 0, tmp2);
6038 tmp2 = tcg_temp_new_i32();
6039 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6040 tcg_gen_shli_i32(tmp2, tmp2, 16);
6041 tcg_gen_or_i32(tmp2, tmp2, tmp);
6042 neon_store_reg(rd, 1, tmp2);
6043 tcg_temp_free_i32(tmp);
6045 case NEON_2RM_VCVT_F32_F16:
6046 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6050 tmp3 = tcg_temp_new_i32();
6051 tmp = neon_load_reg(rm, 0);
6052 tmp2 = neon_load_reg(rm, 1);
6053 tcg_gen_ext16u_i32(tmp3, tmp);
6054 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6055 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6056 tcg_gen_shri_i32(tmp3, tmp, 16);
6057 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6058 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6059 tcg_temp_free_i32(tmp);
6060 tcg_gen_ext16u_i32(tmp3, tmp2);
6061 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6062 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6063 tcg_gen_shri_i32(tmp3, tmp2, 16);
6064 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6065 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6066 tcg_temp_free_i32(tmp2);
6067 tcg_temp_free_i32(tmp3);
6071 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6072 if (neon_2rm_is_float_op(op)) {
6073 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6074 neon_reg_offset(rm, pass));
6077 tmp = neon_load_reg(rm, pass);
6080 case NEON_2RM_VREV32:
6082 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6083 case 1: gen_swap_half(tmp); break;
6087 case NEON_2RM_VREV16:
6092 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6093 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6094 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6100 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6101 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6102 case 2: gen_helper_clz(tmp, tmp); break;
6107 gen_helper_neon_cnt_u8(tmp, tmp);
6110 tcg_gen_not_i32(tmp, tmp);
6112 case NEON_2RM_VQABS:
6115 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6118 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6121 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6126 case NEON_2RM_VQNEG:
6129 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6132 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6135 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6140 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6141 tmp2 = tcg_const_i32(0);
6143 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6144 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6145 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6148 tcg_temp_free(tmp2);
6149 if (op == NEON_2RM_VCLE0) {
6150 tcg_gen_not_i32(tmp, tmp);
6153 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6154 tmp2 = tcg_const_i32(0);
6156 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6157 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6158 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6161 tcg_temp_free(tmp2);
6162 if (op == NEON_2RM_VCLT0) {
6163 tcg_gen_not_i32(tmp, tmp);
6166 case NEON_2RM_VCEQ0:
6167 tmp2 = tcg_const_i32(0);
6169 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6170 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6171 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6174 tcg_temp_free(tmp2);
6178 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6179 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6180 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6185 tmp2 = tcg_const_i32(0);
6186 gen_neon_rsb(size, tmp, tmp2);
6187 tcg_temp_free(tmp2);
6189 case NEON_2RM_VCGT0_F:
6191 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6192 tmp2 = tcg_const_i32(0);
6193 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6194 tcg_temp_free(tmp2);
6195 tcg_temp_free_ptr(fpstatus);
6198 case NEON_2RM_VCGE0_F:
6200 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6201 tmp2 = tcg_const_i32(0);
6202 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6203 tcg_temp_free(tmp2);
6204 tcg_temp_free_ptr(fpstatus);
6207 case NEON_2RM_VCEQ0_F:
6209 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6210 tmp2 = tcg_const_i32(0);
6211 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6212 tcg_temp_free(tmp2);
6213 tcg_temp_free_ptr(fpstatus);
6216 case NEON_2RM_VCLE0_F:
6218 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6219 tmp2 = tcg_const_i32(0);
6220 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6221 tcg_temp_free(tmp2);
6222 tcg_temp_free_ptr(fpstatus);
6225 case NEON_2RM_VCLT0_F:
6227 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6228 tmp2 = tcg_const_i32(0);
6229 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6230 tcg_temp_free(tmp2);
6231 tcg_temp_free_ptr(fpstatus);
6234 case NEON_2RM_VABS_F:
6237 case NEON_2RM_VNEG_F:
6241 tmp2 = neon_load_reg(rd, pass);
6242 neon_store_reg(rm, pass, tmp2);
6245 tmp2 = neon_load_reg(rd, pass);
6247 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6248 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6251 neon_store_reg(rm, pass, tmp2);
6253 case NEON_2RM_VRECPE:
6254 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6256 case NEON_2RM_VRSQRTE:
6257 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6259 case NEON_2RM_VRECPE_F:
6260 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6262 case NEON_2RM_VRSQRTE_F:
6263 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6265 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6268 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6271 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6272 gen_vfp_tosiz(0, 1);
6274 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6275 gen_vfp_touiz(0, 1);
6278 /* Reserved op values were caught by the
6279 * neon_2rm_sizes[] check earlier.
6283 if (neon_2rm_is_float_op(op)) {
6284 tcg_gen_st_f32(cpu_F0s, cpu_env,
6285 neon_reg_offset(rd, pass));
6287 neon_store_reg(rd, pass, tmp);
6292 } else if ((insn & (1 << 10)) == 0) {
6294 int n = ((insn >> 8) & 3) + 1;
6295 if ((rn + n) > 32) {
6296 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6297 * helper function running off the end of the register file.
6302 if (insn & (1 << 6)) {
6303 tmp = neon_load_reg(rd, 0);
6305 tmp = tcg_temp_new_i32();
6306 tcg_gen_movi_i32(tmp, 0);
6308 tmp2 = neon_load_reg(rm, 0);
6309 tmp4 = tcg_const_i32(rn);
6310 tmp5 = tcg_const_i32(n);
6311 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6312 tcg_temp_free_i32(tmp);
6313 if (insn & (1 << 6)) {
6314 tmp = neon_load_reg(rd, 1);
6316 tmp = tcg_temp_new_i32();
6317 tcg_gen_movi_i32(tmp, 0);
6319 tmp3 = neon_load_reg(rm, 1);
6320 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6321 tcg_temp_free_i32(tmp5);
6322 tcg_temp_free_i32(tmp4);
6323 neon_store_reg(rd, 0, tmp2);
6324 neon_store_reg(rd, 1, tmp3);
6325 tcg_temp_free_i32(tmp);
6326 } else if ((insn & 0x380) == 0) {
6328 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6331 if (insn & (1 << 19)) {
6332 tmp = neon_load_reg(rm, 1);
6334 tmp = neon_load_reg(rm, 0);
6336 if (insn & (1 << 16)) {
6337 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6338 } else if (insn & (1 << 17)) {
6339 if ((insn >> 18) & 1)
6340 gen_neon_dup_high16(tmp);
6342 gen_neon_dup_low16(tmp);
6344 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6345 tmp2 = tcg_temp_new_i32();
6346 tcg_gen_mov_i32(tmp2, tmp);
6347 neon_store_reg(rd, pass, tmp2);
6349 tcg_temp_free_i32(tmp);
6358 static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn)
6360 int crn = (insn >> 16) & 0xf;
6361 int crm = insn & 0xf;
6362 int op1 = (insn >> 21) & 7;
6363 int op2 = (insn >> 5) & 7;
6364 int rt = (insn >> 12) & 0xf;
6367 /* Minimal set of debug registers, since we don't support debug */
6368 if (op1 == 0 && crn == 0 && op2 == 0) {
6371 /* DBGDIDR: just RAZ. In particular this means the
6372 * "debug architecture version" bits will read as
6373 * a reserved value, which should cause Linux to
6374 * not try to use the debug hardware.
6376 tmp = tcg_const_i32(0);
6377 store_reg(s, rt, tmp);
6381 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6382 * don't implement memory mapped debug components
6384 if (ENABLE_ARCH_7) {
6385 tmp = tcg_const_i32(0);
6386 store_reg(s, rt, tmp);
6395 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6396 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6400 tmp = load_cpu_field(teecr);
6401 store_reg(s, rt, tmp);
6404 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6406 if (IS_USER(s) && (env->teecr & 1))
6408 tmp = load_cpu_field(teehbr);
6409 store_reg(s, rt, tmp);
6416 static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn)
6418 int crn = (insn >> 16) & 0xf;
6419 int crm = insn & 0xf;
6420 int op1 = (insn >> 21) & 7;
6421 int op2 = (insn >> 5) & 7;
6422 int rt = (insn >> 12) & 0xf;
6425 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6426 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6430 tmp = load_reg(s, rt);
6431 gen_helper_set_teecr(cpu_env, tmp);
6432 tcg_temp_free_i32(tmp);
6435 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6437 if (IS_USER(s) && (env->teecr & 1))
6439 tmp = load_reg(s, rt);
6440 store_cpu_field(tmp, teehbr);
6447 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6449 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6450 const ARMCPRegInfo *ri;
6451 ARMCPU *cpu = arm_env_get_cpu(env);
6453 cpnum = (insn >> 8) & 0xf;
6454 if (arm_feature(env, ARM_FEATURE_XSCALE)
6455 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6458 /* First check for coprocessor space used for actual instructions */
6462 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6463 return disas_iwmmxt_insn(env, s, insn);
6464 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6465 return disas_dsp_insn(env, s, insn);
6470 return disas_vfp_insn (env, s, insn);
6475 /* Otherwise treat as a generic register access */
6476 is64 = (insn & (1 << 25)) == 0;
6477 if (!is64 && ((insn & (1 << 4)) == 0)) {
6485 opc1 = (insn >> 4) & 0xf;
6487 rt2 = (insn >> 16) & 0xf;
6489 crn = (insn >> 16) & 0xf;
6490 opc1 = (insn >> 21) & 7;
6491 opc2 = (insn >> 5) & 7;
6494 isread = (insn >> 20) & 1;
6495 rt = (insn >> 12) & 0xf;
6497 ri = get_arm_cp_reginfo(cpu,
6498 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6500 /* Check access permissions */
6501 if (!cp_access_ok(env, ri, isread)) {
6505 /* Handle special cases first */
6506 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6513 gen_set_pc_im(s->pc);
6514 s->is_jmp = DISAS_WFI;
6525 if (ri->type & ARM_CP_CONST) {
6526 tmp64 = tcg_const_i64(ri->resetvalue);
6527 } else if (ri->readfn) {
6529 gen_set_pc_im(s->pc);
6530 tmp64 = tcg_temp_new_i64();
6531 tmpptr = tcg_const_ptr(ri);
6532 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6533 tcg_temp_free_ptr(tmpptr);
6535 tmp64 = tcg_temp_new_i64();
6536 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6538 tmp = tcg_temp_new_i32();
6539 tcg_gen_trunc_i64_i32(tmp, tmp64);
6540 store_reg(s, rt, tmp);
6541 tcg_gen_shri_i64(tmp64, tmp64, 32);
6542 tcg_gen_trunc_i64_i32(tmp, tmp64);
6543 store_reg(s, rt2, tmp);
6546 if (ri->type & ARM_CP_CONST) {
6547 tmp = tcg_const_i32(ri->resetvalue);
6548 } else if (ri->readfn) {
6550 gen_set_pc_im(s->pc);
6551 tmp = tcg_temp_new_i32();
6552 tmpptr = tcg_const_ptr(ri);
6553 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6554 tcg_temp_free_ptr(tmpptr);
6556 tmp = load_cpu_offset(ri->fieldoffset);
6559 /* Destination register of r15 for 32 bit loads sets
6560 * the condition codes from the high 4 bits of the value
6563 tcg_temp_free_i32(tmp);
6565 store_reg(s, rt, tmp);
6570 if (ri->type & ARM_CP_CONST) {
6571 /* If not forbidden by access permissions, treat as WI */
6577 TCGv_i64 tmp64 = tcg_temp_new_i64();
6578 tmplo = load_reg(s, rt);
6579 tmphi = load_reg(s, rt2);
6580 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6581 tcg_temp_free_i32(tmplo);
6582 tcg_temp_free_i32(tmphi);
6584 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6585 gen_set_pc_im(s->pc);
6586 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6587 tcg_temp_free_ptr(tmpptr);
6589 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6591 tcg_temp_free_i64(tmp64);
6596 gen_set_pc_im(s->pc);
6597 tmp = load_reg(s, rt);
6598 tmpptr = tcg_const_ptr(ri);
6599 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6600 tcg_temp_free_ptr(tmpptr);
6601 tcg_temp_free_i32(tmp);
6603 TCGv tmp = load_reg(s, rt);
6604 store_cpu_offset(tmp, ri->fieldoffset);
6607 /* We default to ending the TB on a coprocessor register write,
6608 * but allow this to be suppressed by the register definition
6609 * (usually only necessary to work around guest bugs).
6611 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6618 /* Fallback code: handle coprocessor registers not yet converted
6623 if (insn & (1 << 20))
6624 return disas_cp14_read(env, s, insn);
6626 return disas_cp14_write(env, s, insn);
6628 return disas_cp15_insn (env, s, insn);
6635 /* Store a 64-bit value to a register pair. Clobbers val. */
6636 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6639 tmp = tcg_temp_new_i32();
6640 tcg_gen_trunc_i64_i32(tmp, val);
6641 store_reg(s, rlow, tmp);
6642 tmp = tcg_temp_new_i32();
6643 tcg_gen_shri_i64(val, val, 32);
6644 tcg_gen_trunc_i64_i32(tmp, val);
6645 store_reg(s, rhigh, tmp);
6648 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6649 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6654 /* Load value and extend to 64 bits. */
6655 tmp = tcg_temp_new_i64();
6656 tmp2 = load_reg(s, rlow);
6657 tcg_gen_extu_i32_i64(tmp, tmp2);
6658 tcg_temp_free_i32(tmp2);
6659 tcg_gen_add_i64(val, val, tmp);
6660 tcg_temp_free_i64(tmp);
6663 /* load and add a 64-bit value from a register pair. */
6664 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6670 /* Load 64-bit value rd:rn. */
6671 tmpl = load_reg(s, rlow);
6672 tmph = load_reg(s, rhigh);
6673 tmp = tcg_temp_new_i64();
6674 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6675 tcg_temp_free_i32(tmpl);
6676 tcg_temp_free_i32(tmph);
6677 tcg_gen_add_i64(val, val, tmp);
6678 tcg_temp_free_i64(tmp);
6681 /* Set N and Z flags from a 64-bit value. */
6682 static void gen_logicq_cc(TCGv_i64 val)
6684 TCGv tmp = tcg_temp_new_i32();
6685 gen_helper_logicq_cc(tmp, val);
6687 tcg_temp_free_i32(tmp);
6690 /* Load/Store exclusive instructions are implemented by remembering
6691 the value/address loaded, and seeing if these are the same
6692 when the store is performed. This should be is sufficient to implement
6693 the architecturally mandated semantics, and avoids having to monitor
6696 In system emulation mode only one CPU will be running at once, so
6697 this sequence is effectively atomic. In user emulation mode we
6698 throw an exception and handle the atomic operation elsewhere. */
6699 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6700 TCGv addr, int size)
6706 tmp = gen_ld8u(addr, IS_USER(s));
6709 tmp = gen_ld16u(addr, IS_USER(s));
6713 tmp = gen_ld32(addr, IS_USER(s));
6718 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6719 store_reg(s, rt, tmp);
6721 TCGv tmp2 = tcg_temp_new_i32();
6722 tcg_gen_addi_i32(tmp2, addr, 4);
6723 tmp = gen_ld32(tmp2, IS_USER(s));
6724 tcg_temp_free_i32(tmp2);
6725 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6726 store_reg(s, rt2, tmp);
6728 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6731 static void gen_clrex(DisasContext *s)
6733 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6736 #ifdef CONFIG_USER_ONLY
6737 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6738 TCGv addr, int size)
6740 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6741 tcg_gen_movi_i32(cpu_exclusive_info,
6742 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6743 gen_exception_insn(s, 4, EXCP_STREX);
6746 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6747 TCGv addr, int size)
6753 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6759 fail_label = gen_new_label();
6760 done_label = gen_new_label();
6761 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6764 tmp = gen_ld8u(addr, IS_USER(s));
6767 tmp = gen_ld16u(addr, IS_USER(s));
6771 tmp = gen_ld32(addr, IS_USER(s));
6776 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6777 tcg_temp_free_i32(tmp);
6779 TCGv tmp2 = tcg_temp_new_i32();
6780 tcg_gen_addi_i32(tmp2, addr, 4);
6781 tmp = gen_ld32(tmp2, IS_USER(s));
6782 tcg_temp_free_i32(tmp2);
6783 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6784 tcg_temp_free_i32(tmp);
6786 tmp = load_reg(s, rt);
6789 gen_st8(tmp, addr, IS_USER(s));
6792 gen_st16(tmp, addr, IS_USER(s));
6796 gen_st32(tmp, addr, IS_USER(s));
6802 tcg_gen_addi_i32(addr, addr, 4);
6803 tmp = load_reg(s, rt2);
6804 gen_st32(tmp, addr, IS_USER(s));
6806 tcg_gen_movi_i32(cpu_R[rd], 0);
6807 tcg_gen_br(done_label);
6808 gen_set_label(fail_label);
6809 tcg_gen_movi_i32(cpu_R[rd], 1);
6810 gen_set_label(done_label);
6811 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6815 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6817 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6824 insn = arm_ldl_code(s->pc, s->bswap_code);
6827 /* M variants do not implement ARM mode. */
6832 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6833 * choose to UNDEF. In ARMv5 and above the space is used
6834 * for miscellaneous unconditional instructions.
6838 /* Unconditional instructions. */
6839 if (((insn >> 25) & 7) == 1) {
6840 /* NEON Data processing. */
6841 if (!arm_feature(env, ARM_FEATURE_NEON))
6844 if (disas_neon_data_insn(env, s, insn))
6848 if ((insn & 0x0f100000) == 0x04000000) {
6849 /* NEON load/store. */
6850 if (!arm_feature(env, ARM_FEATURE_NEON))
6853 if (disas_neon_ls_insn(env, s, insn))
6857 if (((insn & 0x0f30f000) == 0x0510f000) ||
6858 ((insn & 0x0f30f010) == 0x0710f000)) {
6859 if ((insn & (1 << 22)) == 0) {
6861 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6865 /* Otherwise PLD; v5TE+ */
6869 if (((insn & 0x0f70f000) == 0x0450f000) ||
6870 ((insn & 0x0f70f010) == 0x0650f000)) {
6872 return; /* PLI; V7 */
6874 if (((insn & 0x0f700000) == 0x04100000) ||
6875 ((insn & 0x0f700010) == 0x06100000)) {
6876 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6879 return; /* v7MP: Unallocated memory hint: must NOP */
6882 if ((insn & 0x0ffffdff) == 0x01010000) {
6885 if (((insn >> 9) & 1) != s->bswap_code) {
6886 /* Dynamic endianness switching not implemented. */
6890 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6891 switch ((insn >> 4) & 0xf) {
6900 /* We don't emulate caches so these are a no-op. */
6905 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6911 op1 = (insn & 0x1f);
6912 addr = tcg_temp_new_i32();
6913 tmp = tcg_const_i32(op1);
6914 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6915 tcg_temp_free_i32(tmp);
6916 i = (insn >> 23) & 3;
6918 case 0: offset = -4; break; /* DA */
6919 case 1: offset = 0; break; /* IA */
6920 case 2: offset = -8; break; /* DB */
6921 case 3: offset = 4; break; /* IB */
6925 tcg_gen_addi_i32(addr, addr, offset);
6926 tmp = load_reg(s, 14);
6927 gen_st32(tmp, addr, 0);
6928 tmp = load_cpu_field(spsr);
6929 tcg_gen_addi_i32(addr, addr, 4);
6930 gen_st32(tmp, addr, 0);
6931 if (insn & (1 << 21)) {
6932 /* Base writeback. */
6934 case 0: offset = -8; break;
6935 case 1: offset = 4; break;
6936 case 2: offset = -4; break;
6937 case 3: offset = 0; break;
6941 tcg_gen_addi_i32(addr, addr, offset);
6942 tmp = tcg_const_i32(op1);
6943 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6944 tcg_temp_free_i32(tmp);
6945 tcg_temp_free_i32(addr);
6947 tcg_temp_free_i32(addr);
6950 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6956 rn = (insn >> 16) & 0xf;
6957 addr = load_reg(s, rn);
6958 i = (insn >> 23) & 3;
6960 case 0: offset = -4; break; /* DA */
6961 case 1: offset = 0; break; /* IA */
6962 case 2: offset = -8; break; /* DB */
6963 case 3: offset = 4; break; /* IB */
6967 tcg_gen_addi_i32(addr, addr, offset);
6968 /* Load PC into tmp and CPSR into tmp2. */
6969 tmp = gen_ld32(addr, 0);
6970 tcg_gen_addi_i32(addr, addr, 4);
6971 tmp2 = gen_ld32(addr, 0);
6972 if (insn & (1 << 21)) {
6973 /* Base writeback. */
6975 case 0: offset = -8; break;
6976 case 1: offset = 4; break;
6977 case 2: offset = -4; break;
6978 case 3: offset = 0; break;
6982 tcg_gen_addi_i32(addr, addr, offset);
6983 store_reg(s, rn, addr);
6985 tcg_temp_free_i32(addr);
6987 gen_rfe(s, tmp, tmp2);
6989 } else if ((insn & 0x0e000000) == 0x0a000000) {
6990 /* branch link and change to thumb (blx <offset>) */
6993 val = (uint32_t)s->pc;
6994 tmp = tcg_temp_new_i32();
6995 tcg_gen_movi_i32(tmp, val);
6996 store_reg(s, 14, tmp);
6997 /* Sign-extend the 24-bit offset */
6998 offset = (((int32_t)insn) << 8) >> 8;
6999 /* offset * 4 + bit24 * 2 + (thumb bit) */
7000 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7001 /* pipeline offset */
7003 /* protected by ARCH(5); above, near the start of uncond block */
7006 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7007 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7008 /* iWMMXt register transfer. */
7009 if (env->cp15.c15_cpar & (1 << 1))
7010 if (!disas_iwmmxt_insn(env, s, insn))
7013 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7014 /* Coprocessor double register transfer. */
7016 } else if ((insn & 0x0f000010) == 0x0e000010) {
7017 /* Additional coprocessor register transfer. */
7018 } else if ((insn & 0x0ff10020) == 0x01000000) {
7021 /* cps (privileged) */
7025 if (insn & (1 << 19)) {
7026 if (insn & (1 << 8))
7028 if (insn & (1 << 7))
7030 if (insn & (1 << 6))
7032 if (insn & (1 << 18))
7035 if (insn & (1 << 17)) {
7037 val |= (insn & 0x1f);
7040 gen_set_psr_im(s, mask, 0, val);
7047 /* if not always execute, we generate a conditional jump to
7049 s->condlabel = gen_new_label();
7050 gen_test_cc(cond ^ 1, s->condlabel);
7053 if ((insn & 0x0f900000) == 0x03000000) {
7054 if ((insn & (1 << 21)) == 0) {
7056 rd = (insn >> 12) & 0xf;
7057 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7058 if ((insn & (1 << 22)) == 0) {
7060 tmp = tcg_temp_new_i32();
7061 tcg_gen_movi_i32(tmp, val);
7064 tmp = load_reg(s, rd);
7065 tcg_gen_ext16u_i32(tmp, tmp);
7066 tcg_gen_ori_i32(tmp, tmp, val << 16);
7068 store_reg(s, rd, tmp);
7070 if (((insn >> 12) & 0xf) != 0xf)
7072 if (((insn >> 16) & 0xf) == 0) {
7073 gen_nop_hint(s, insn & 0xff);
7075 /* CPSR = immediate */
7077 shift = ((insn >> 8) & 0xf) * 2;
7079 val = (val >> shift) | (val << (32 - shift));
7080 i = ((insn & (1 << 22)) != 0);
7081 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
7085 } else if ((insn & 0x0f900000) == 0x01000000
7086 && (insn & 0x00000090) != 0x00000090) {
7087 /* miscellaneous instructions */
7088 op1 = (insn >> 21) & 3;
7089 sh = (insn >> 4) & 0xf;
7092 case 0x0: /* move program status register */
7095 tmp = load_reg(s, rm);
7096 i = ((op1 & 2) != 0);
7097 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
7101 rd = (insn >> 12) & 0xf;
7105 tmp = load_cpu_field(spsr);
7107 tmp = tcg_temp_new_i32();
7108 gen_helper_cpsr_read(tmp);
7110 store_reg(s, rd, tmp);
7115 /* branch/exchange thumb (bx). */
7117 tmp = load_reg(s, rm);
7119 } else if (op1 == 3) {
7122 rd = (insn >> 12) & 0xf;
7123 tmp = load_reg(s, rm);
7124 gen_helper_clz(tmp, tmp);
7125 store_reg(s, rd, tmp);
7133 /* Trivial implementation equivalent to bx. */
7134 tmp = load_reg(s, rm);
7145 /* branch link/exchange thumb (blx) */
7146 tmp = load_reg(s, rm);
7147 tmp2 = tcg_temp_new_i32();
7148 tcg_gen_movi_i32(tmp2, s->pc);
7149 store_reg(s, 14, tmp2);
7152 case 0x5: /* saturating add/subtract */
7154 rd = (insn >> 12) & 0xf;
7155 rn = (insn >> 16) & 0xf;
7156 tmp = load_reg(s, rm);
7157 tmp2 = load_reg(s, rn);
7159 gen_helper_double_saturate(tmp2, tmp2);
7161 gen_helper_sub_saturate(tmp, tmp, tmp2);
7163 gen_helper_add_saturate(tmp, tmp, tmp2);
7164 tcg_temp_free_i32(tmp2);
7165 store_reg(s, rd, tmp);
7168 /* SMC instruction (op1 == 3)
7169 and undefined instructions (op1 == 0 || op1 == 2)
7176 gen_exception_insn(s, 4, EXCP_BKPT);
7178 case 0x8: /* signed multiply */
7183 rs = (insn >> 8) & 0xf;
7184 rn = (insn >> 12) & 0xf;
7185 rd = (insn >> 16) & 0xf;
7187 /* (32 * 16) >> 16 */
7188 tmp = load_reg(s, rm);
7189 tmp2 = load_reg(s, rs);
7191 tcg_gen_sari_i32(tmp2, tmp2, 16);
7194 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7195 tcg_gen_shri_i64(tmp64, tmp64, 16);
7196 tmp = tcg_temp_new_i32();
7197 tcg_gen_trunc_i64_i32(tmp, tmp64);
7198 tcg_temp_free_i64(tmp64);
7199 if ((sh & 2) == 0) {
7200 tmp2 = load_reg(s, rn);
7201 gen_helper_add_setq(tmp, tmp, tmp2);
7202 tcg_temp_free_i32(tmp2);
7204 store_reg(s, rd, tmp);
7207 tmp = load_reg(s, rm);
7208 tmp2 = load_reg(s, rs);
7209 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7210 tcg_temp_free_i32(tmp2);
7212 tmp64 = tcg_temp_new_i64();
7213 tcg_gen_ext_i32_i64(tmp64, tmp);
7214 tcg_temp_free_i32(tmp);
7215 gen_addq(s, tmp64, rn, rd);
7216 gen_storeq_reg(s, rn, rd, tmp64);
7217 tcg_temp_free_i64(tmp64);
7220 tmp2 = load_reg(s, rn);
7221 gen_helper_add_setq(tmp, tmp, tmp2);
7222 tcg_temp_free_i32(tmp2);
7224 store_reg(s, rd, tmp);
7231 } else if (((insn & 0x0e000000) == 0 &&
7232 (insn & 0x00000090) != 0x90) ||
7233 ((insn & 0x0e000000) == (1 << 25))) {
7234 int set_cc, logic_cc, shiftop;
7236 op1 = (insn >> 21) & 0xf;
7237 set_cc = (insn >> 20) & 1;
7238 logic_cc = table_logic_cc[op1] & set_cc;
7240 /* data processing instruction */
7241 if (insn & (1 << 25)) {
7242 /* immediate operand */
7244 shift = ((insn >> 8) & 0xf) * 2;
7246 val = (val >> shift) | (val << (32 - shift));
7248 tmp2 = tcg_temp_new_i32();
7249 tcg_gen_movi_i32(tmp2, val);
7250 if (logic_cc && shift) {
7251 gen_set_CF_bit31(tmp2);
7256 tmp2 = load_reg(s, rm);
7257 shiftop = (insn >> 5) & 3;
7258 if (!(insn & (1 << 4))) {
7259 shift = (insn >> 7) & 0x1f;
7260 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7262 rs = (insn >> 8) & 0xf;
7263 tmp = load_reg(s, rs);
7264 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7267 if (op1 != 0x0f && op1 != 0x0d) {
7268 rn = (insn >> 16) & 0xf;
7269 tmp = load_reg(s, rn);
7273 rd = (insn >> 12) & 0xf;
7276 tcg_gen_and_i32(tmp, tmp, tmp2);
7280 store_reg_bx(env, s, rd, tmp);
7283 tcg_gen_xor_i32(tmp, tmp, tmp2);
7287 store_reg_bx(env, s, rd, tmp);
7290 if (set_cc && rd == 15) {
7291 /* SUBS r15, ... is used for exception return. */
7295 gen_helper_sub_cc(tmp, tmp, tmp2);
7296 gen_exception_return(s, tmp);
7299 gen_helper_sub_cc(tmp, tmp, tmp2);
7301 tcg_gen_sub_i32(tmp, tmp, tmp2);
7303 store_reg_bx(env, s, rd, tmp);
7308 gen_helper_sub_cc(tmp, tmp2, tmp);
7310 tcg_gen_sub_i32(tmp, tmp2, tmp);
7312 store_reg_bx(env, s, rd, tmp);
7316 gen_helper_add_cc(tmp, tmp, tmp2);
7318 tcg_gen_add_i32(tmp, tmp, tmp2);
7320 store_reg_bx(env, s, rd, tmp);
7324 gen_helper_adc_cc(tmp, tmp, tmp2);
7326 gen_add_carry(tmp, tmp, tmp2);
7328 store_reg_bx(env, s, rd, tmp);
7332 gen_helper_sbc_cc(tmp, tmp, tmp2);
7334 gen_sub_carry(tmp, tmp, tmp2);
7336 store_reg_bx(env, s, rd, tmp);
7340 gen_helper_sbc_cc(tmp, tmp2, tmp);
7342 gen_sub_carry(tmp, tmp2, tmp);
7344 store_reg_bx(env, s, rd, tmp);
7348 tcg_gen_and_i32(tmp, tmp, tmp2);
7351 tcg_temp_free_i32(tmp);
7355 tcg_gen_xor_i32(tmp, tmp, tmp2);
7358 tcg_temp_free_i32(tmp);
7362 gen_helper_sub_cc(tmp, tmp, tmp2);
7364 tcg_temp_free_i32(tmp);
7368 gen_helper_add_cc(tmp, tmp, tmp2);
7370 tcg_temp_free_i32(tmp);
7373 tcg_gen_or_i32(tmp, tmp, tmp2);
7377 store_reg_bx(env, s, rd, tmp);
7380 if (logic_cc && rd == 15) {
7381 /* MOVS r15, ... is used for exception return. */
7385 gen_exception_return(s, tmp2);
7390 store_reg_bx(env, s, rd, tmp2);
7394 tcg_gen_andc_i32(tmp, tmp, tmp2);
7398 store_reg_bx(env, s, rd, tmp);
7402 tcg_gen_not_i32(tmp2, tmp2);
7406 store_reg_bx(env, s, rd, tmp2);
7409 if (op1 != 0x0f && op1 != 0x0d) {
7410 tcg_temp_free_i32(tmp2);
7413 /* other instructions */
7414 op1 = (insn >> 24) & 0xf;
7418 /* multiplies, extra load/stores */
7419 sh = (insn >> 5) & 3;
7422 rd = (insn >> 16) & 0xf;
7423 rn = (insn >> 12) & 0xf;
7424 rs = (insn >> 8) & 0xf;
7426 op1 = (insn >> 20) & 0xf;
7428 case 0: case 1: case 2: case 3: case 6:
7430 tmp = load_reg(s, rs);
7431 tmp2 = load_reg(s, rm);
7432 tcg_gen_mul_i32(tmp, tmp, tmp2);
7433 tcg_temp_free_i32(tmp2);
7434 if (insn & (1 << 22)) {
7435 /* Subtract (mls) */
7437 tmp2 = load_reg(s, rn);
7438 tcg_gen_sub_i32(tmp, tmp2, tmp);
7439 tcg_temp_free_i32(tmp2);
7440 } else if (insn & (1 << 21)) {
7442 tmp2 = load_reg(s, rn);
7443 tcg_gen_add_i32(tmp, tmp, tmp2);
7444 tcg_temp_free_i32(tmp2);
7446 if (insn & (1 << 20))
7448 store_reg(s, rd, tmp);
7451 /* 64 bit mul double accumulate (UMAAL) */
7453 tmp = load_reg(s, rs);
7454 tmp2 = load_reg(s, rm);
7455 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7456 gen_addq_lo(s, tmp64, rn);
7457 gen_addq_lo(s, tmp64, rd);
7458 gen_storeq_reg(s, rn, rd, tmp64);
7459 tcg_temp_free_i64(tmp64);
7461 case 8: case 9: case 10: case 11:
7462 case 12: case 13: case 14: case 15:
7463 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7464 tmp = load_reg(s, rs);
7465 tmp2 = load_reg(s, rm);
7466 if (insn & (1 << 22)) {
7467 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7469 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7471 if (insn & (1 << 21)) { /* mult accumulate */
7472 gen_addq(s, tmp64, rn, rd);
7474 if (insn & (1 << 20)) {
7475 gen_logicq_cc(tmp64);
7477 gen_storeq_reg(s, rn, rd, tmp64);
7478 tcg_temp_free_i64(tmp64);
7484 rn = (insn >> 16) & 0xf;
7485 rd = (insn >> 12) & 0xf;
7486 if (insn & (1 << 23)) {
7487 /* load/store exclusive */
7488 op1 = (insn >> 21) & 0x3;
7493 addr = tcg_temp_local_new_i32();
7494 load_reg_var(s, addr, rn);
7495 if (insn & (1 << 20)) {
7498 gen_load_exclusive(s, rd, 15, addr, 2);
7500 case 1: /* ldrexd */
7501 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7503 case 2: /* ldrexb */
7504 gen_load_exclusive(s, rd, 15, addr, 0);
7506 case 3: /* ldrexh */
7507 gen_load_exclusive(s, rd, 15, addr, 1);
7516 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7518 case 1: /* strexd */
7519 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7521 case 2: /* strexb */
7522 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7524 case 3: /* strexh */
7525 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7531 tcg_temp_free(addr);
7533 /* SWP instruction */
7536 /* ??? This is not really atomic. However we know
7537 we never have multiple CPUs running in parallel,
7538 so it is good enough. */
7539 addr = load_reg(s, rn);
7540 tmp = load_reg(s, rm);
7541 if (insn & (1 << 22)) {
7542 tmp2 = gen_ld8u(addr, IS_USER(s));
7543 gen_st8(tmp, addr, IS_USER(s));
7545 tmp2 = gen_ld32(addr, IS_USER(s));
7546 gen_st32(tmp, addr, IS_USER(s));
7548 tcg_temp_free_i32(addr);
7549 store_reg(s, rd, tmp2);
7555 /* Misc load/store */
7556 rn = (insn >> 16) & 0xf;
7557 rd = (insn >> 12) & 0xf;
7558 addr = load_reg(s, rn);
7559 if (insn & (1 << 24))
7560 gen_add_datah_offset(s, insn, 0, addr);
7562 if (insn & (1 << 20)) {
7566 tmp = gen_ld16u(addr, IS_USER(s));
7569 tmp = gen_ld8s(addr, IS_USER(s));
7573 tmp = gen_ld16s(addr, IS_USER(s));
7577 } else if (sh & 2) {
7582 tmp = load_reg(s, rd);
7583 gen_st32(tmp, addr, IS_USER(s));
7584 tcg_gen_addi_i32(addr, addr, 4);
7585 tmp = load_reg(s, rd + 1);
7586 gen_st32(tmp, addr, IS_USER(s));
7590 tmp = gen_ld32(addr, IS_USER(s));
7591 store_reg(s, rd, tmp);
7592 tcg_gen_addi_i32(addr, addr, 4);
7593 tmp = gen_ld32(addr, IS_USER(s));
7597 address_offset = -4;
7600 tmp = load_reg(s, rd);
7601 gen_st16(tmp, addr, IS_USER(s));
7604 /* Perform base writeback before the loaded value to
7605 ensure correct behavior with overlapping index registers.
7606 ldrd with base writeback is is undefined if the
7607 destination and index registers overlap. */
7608 if (!(insn & (1 << 24))) {
7609 gen_add_datah_offset(s, insn, address_offset, addr);
7610 store_reg(s, rn, addr);
7611 } else if (insn & (1 << 21)) {
7613 tcg_gen_addi_i32(addr, addr, address_offset);
7614 store_reg(s, rn, addr);
7616 tcg_temp_free_i32(addr);
7619 /* Complete the load. */
7620 store_reg(s, rd, tmp);
7629 if (insn & (1 << 4)) {
7631 /* Armv6 Media instructions. */
7633 rn = (insn >> 16) & 0xf;
7634 rd = (insn >> 12) & 0xf;
7635 rs = (insn >> 8) & 0xf;
7636 switch ((insn >> 23) & 3) {
7637 case 0: /* Parallel add/subtract. */
7638 op1 = (insn >> 20) & 7;
7639 tmp = load_reg(s, rn);
7640 tmp2 = load_reg(s, rm);
7641 sh = (insn >> 5) & 7;
7642 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7644 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7645 tcg_temp_free_i32(tmp2);
7646 store_reg(s, rd, tmp);
7649 if ((insn & 0x00700020) == 0) {
7650 /* Halfword pack. */
7651 tmp = load_reg(s, rn);
7652 tmp2 = load_reg(s, rm);
7653 shift = (insn >> 7) & 0x1f;
7654 if (insn & (1 << 6)) {
7658 tcg_gen_sari_i32(tmp2, tmp2, shift);
7659 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7660 tcg_gen_ext16u_i32(tmp2, tmp2);
7664 tcg_gen_shli_i32(tmp2, tmp2, shift);
7665 tcg_gen_ext16u_i32(tmp, tmp);
7666 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7668 tcg_gen_or_i32(tmp, tmp, tmp2);
7669 tcg_temp_free_i32(tmp2);
7670 store_reg(s, rd, tmp);
7671 } else if ((insn & 0x00200020) == 0x00200000) {
7673 tmp = load_reg(s, rm);
7674 shift = (insn >> 7) & 0x1f;
7675 if (insn & (1 << 6)) {
7678 tcg_gen_sari_i32(tmp, tmp, shift);
7680 tcg_gen_shli_i32(tmp, tmp, shift);
7682 sh = (insn >> 16) & 0x1f;
7683 tmp2 = tcg_const_i32(sh);
7684 if (insn & (1 << 22))
7685 gen_helper_usat(tmp, tmp, tmp2);
7687 gen_helper_ssat(tmp, tmp, tmp2);
7688 tcg_temp_free_i32(tmp2);
7689 store_reg(s, rd, tmp);
7690 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7692 tmp = load_reg(s, rm);
7693 sh = (insn >> 16) & 0x1f;
7694 tmp2 = tcg_const_i32(sh);
7695 if (insn & (1 << 22))
7696 gen_helper_usat16(tmp, tmp, tmp2);
7698 gen_helper_ssat16(tmp, tmp, tmp2);
7699 tcg_temp_free_i32(tmp2);
7700 store_reg(s, rd, tmp);
7701 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7703 tmp = load_reg(s, rn);
7704 tmp2 = load_reg(s, rm);
7705 tmp3 = tcg_temp_new_i32();
7706 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7707 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7708 tcg_temp_free_i32(tmp3);
7709 tcg_temp_free_i32(tmp2);
7710 store_reg(s, rd, tmp);
7711 } else if ((insn & 0x000003e0) == 0x00000060) {
7712 tmp = load_reg(s, rm);
7713 shift = (insn >> 10) & 3;
7714 /* ??? In many cases it's not necessary to do a
7715 rotate, a shift is sufficient. */
7717 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7718 op1 = (insn >> 20) & 7;
7720 case 0: gen_sxtb16(tmp); break;
7721 case 2: gen_sxtb(tmp); break;
7722 case 3: gen_sxth(tmp); break;
7723 case 4: gen_uxtb16(tmp); break;
7724 case 6: gen_uxtb(tmp); break;
7725 case 7: gen_uxth(tmp); break;
7726 default: goto illegal_op;
7729 tmp2 = load_reg(s, rn);
7730 if ((op1 & 3) == 0) {
7731 gen_add16(tmp, tmp2);
7733 tcg_gen_add_i32(tmp, tmp, tmp2);
7734 tcg_temp_free_i32(tmp2);
7737 store_reg(s, rd, tmp);
7738 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7740 tmp = load_reg(s, rm);
7741 if (insn & (1 << 22)) {
7742 if (insn & (1 << 7)) {
7746 gen_helper_rbit(tmp, tmp);
7749 if (insn & (1 << 7))
7752 tcg_gen_bswap32_i32(tmp, tmp);
7754 store_reg(s, rd, tmp);
7759 case 2: /* Multiplies (Type 3). */
7760 switch ((insn >> 20) & 0x7) {
7762 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7763 /* op2 not 00x or 11x : UNDEF */
7766 /* Signed multiply most significant [accumulate].
7767 (SMMUL, SMMLA, SMMLS) */
7768 tmp = load_reg(s, rm);
7769 tmp2 = load_reg(s, rs);
7770 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7773 tmp = load_reg(s, rd);
7774 if (insn & (1 << 6)) {
7775 tmp64 = gen_subq_msw(tmp64, tmp);
7777 tmp64 = gen_addq_msw(tmp64, tmp);
7780 if (insn & (1 << 5)) {
7781 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7783 tcg_gen_shri_i64(tmp64, tmp64, 32);
7784 tmp = tcg_temp_new_i32();
7785 tcg_gen_trunc_i64_i32(tmp, tmp64);
7786 tcg_temp_free_i64(tmp64);
7787 store_reg(s, rn, tmp);
7791 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7792 if (insn & (1 << 7)) {
7795 tmp = load_reg(s, rm);
7796 tmp2 = load_reg(s, rs);
7797 if (insn & (1 << 5))
7798 gen_swap_half(tmp2);
7799 gen_smul_dual(tmp, tmp2);
7800 if (insn & (1 << 6)) {
7801 /* This subtraction cannot overflow. */
7802 tcg_gen_sub_i32(tmp, tmp, tmp2);
7804 /* This addition cannot overflow 32 bits;
7805 * however it may overflow considered as a signed
7806 * operation, in which case we must set the Q flag.
7808 gen_helper_add_setq(tmp, tmp, tmp2);
7810 tcg_temp_free_i32(tmp2);
7811 if (insn & (1 << 22)) {
7812 /* smlald, smlsld */
7813 tmp64 = tcg_temp_new_i64();
7814 tcg_gen_ext_i32_i64(tmp64, tmp);
7815 tcg_temp_free_i32(tmp);
7816 gen_addq(s, tmp64, rd, rn);
7817 gen_storeq_reg(s, rd, rn, tmp64);
7818 tcg_temp_free_i64(tmp64);
7820 /* smuad, smusd, smlad, smlsd */
7823 tmp2 = load_reg(s, rd);
7824 gen_helper_add_setq(tmp, tmp, tmp2);
7825 tcg_temp_free_i32(tmp2);
7827 store_reg(s, rn, tmp);
7833 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7836 if (((insn >> 5) & 7) || (rd != 15)) {
7839 tmp = load_reg(s, rm);
7840 tmp2 = load_reg(s, rs);
7841 if (insn & (1 << 21)) {
7842 gen_helper_udiv(tmp, tmp, tmp2);
7844 gen_helper_sdiv(tmp, tmp, tmp2);
7846 tcg_temp_free_i32(tmp2);
7847 store_reg(s, rn, tmp);
7854 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7856 case 0: /* Unsigned sum of absolute differences. */
7858 tmp = load_reg(s, rm);
7859 tmp2 = load_reg(s, rs);
7860 gen_helper_usad8(tmp, tmp, tmp2);
7861 tcg_temp_free_i32(tmp2);
7863 tmp2 = load_reg(s, rd);
7864 tcg_gen_add_i32(tmp, tmp, tmp2);
7865 tcg_temp_free_i32(tmp2);
7867 store_reg(s, rn, tmp);
7869 case 0x20: case 0x24: case 0x28: case 0x2c:
7870 /* Bitfield insert/clear. */
7872 shift = (insn >> 7) & 0x1f;
7873 i = (insn >> 16) & 0x1f;
7876 tmp = tcg_temp_new_i32();
7877 tcg_gen_movi_i32(tmp, 0);
7879 tmp = load_reg(s, rm);
7882 tmp2 = load_reg(s, rd);
7883 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7884 tcg_temp_free_i32(tmp2);
7886 store_reg(s, rd, tmp);
7888 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7889 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7891 tmp = load_reg(s, rm);
7892 shift = (insn >> 7) & 0x1f;
7893 i = ((insn >> 16) & 0x1f) + 1;
7898 gen_ubfx(tmp, shift, (1u << i) - 1);
7900 gen_sbfx(tmp, shift, i);
7903 store_reg(s, rd, tmp);
7913 /* Check for undefined extension instructions
7914 * per the ARM Bible IE:
7915 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7917 sh = (0xf << 20) | (0xf << 4);
7918 if (op1 == 0x7 && ((insn & sh) == sh))
7922 /* load/store byte/word */
7923 rn = (insn >> 16) & 0xf;
7924 rd = (insn >> 12) & 0xf;
7925 tmp2 = load_reg(s, rn);
7926 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7927 if (insn & (1 << 24))
7928 gen_add_data_offset(s, insn, tmp2);
7929 if (insn & (1 << 20)) {
7931 if (insn & (1 << 22)) {
7932 tmp = gen_ld8u(tmp2, i);
7934 tmp = gen_ld32(tmp2, i);
7938 tmp = load_reg(s, rd);
7939 if (insn & (1 << 22))
7940 gen_st8(tmp, tmp2, i);
7942 gen_st32(tmp, tmp2, i);
7944 if (!(insn & (1 << 24))) {
7945 gen_add_data_offset(s, insn, tmp2);
7946 store_reg(s, rn, tmp2);
7947 } else if (insn & (1 << 21)) {
7948 store_reg(s, rn, tmp2);
7950 tcg_temp_free_i32(tmp2);
7952 if (insn & (1 << 20)) {
7953 /* Complete the load. */
7954 store_reg_from_load(env, s, rd, tmp);
7960 int j, n, user, loaded_base;
7962 /* load/store multiple words */
7963 /* XXX: store correct base if write back */
7965 if (insn & (1 << 22)) {
7967 goto illegal_op; /* only usable in supervisor mode */
7969 if ((insn & (1 << 15)) == 0)
7972 rn = (insn >> 16) & 0xf;
7973 addr = load_reg(s, rn);
7975 /* compute total size */
7977 TCGV_UNUSED(loaded_var);
7980 if (insn & (1 << i))
7983 /* XXX: test invalid n == 0 case ? */
7984 if (insn & (1 << 23)) {
7985 if (insn & (1 << 24)) {
7987 tcg_gen_addi_i32(addr, addr, 4);
7989 /* post increment */
7992 if (insn & (1 << 24)) {
7994 tcg_gen_addi_i32(addr, addr, -(n * 4));
7996 /* post decrement */
7998 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8003 if (insn & (1 << i)) {
8004 if (insn & (1 << 20)) {
8006 tmp = gen_ld32(addr, IS_USER(s));
8008 tmp2 = tcg_const_i32(i);
8009 gen_helper_set_user_reg(tmp2, tmp);
8010 tcg_temp_free_i32(tmp2);
8011 tcg_temp_free_i32(tmp);
8012 } else if (i == rn) {
8016 store_reg_from_load(env, s, i, tmp);
8021 /* special case: r15 = PC + 8 */
8022 val = (long)s->pc + 4;
8023 tmp = tcg_temp_new_i32();
8024 tcg_gen_movi_i32(tmp, val);
8026 tmp = tcg_temp_new_i32();
8027 tmp2 = tcg_const_i32(i);
8028 gen_helper_get_user_reg(tmp, tmp2);
8029 tcg_temp_free_i32(tmp2);
8031 tmp = load_reg(s, i);
8033 gen_st32(tmp, addr, IS_USER(s));
8036 /* no need to add after the last transfer */
8038 tcg_gen_addi_i32(addr, addr, 4);
8041 if (insn & (1 << 21)) {
8043 if (insn & (1 << 23)) {
8044 if (insn & (1 << 24)) {
8047 /* post increment */
8048 tcg_gen_addi_i32(addr, addr, 4);
8051 if (insn & (1 << 24)) {
8054 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8056 /* post decrement */
8057 tcg_gen_addi_i32(addr, addr, -(n * 4));
8060 store_reg(s, rn, addr);
8062 tcg_temp_free_i32(addr);
8065 store_reg(s, rn, loaded_var);
8067 if ((insn & (1 << 22)) && !user) {
8068 /* Restore CPSR from SPSR. */
8069 tmp = load_cpu_field(spsr);
8070 gen_set_cpsr(tmp, 0xffffffff);
8071 tcg_temp_free_i32(tmp);
8072 s->is_jmp = DISAS_UPDATE;
8081 /* branch (and link) */
8082 val = (int32_t)s->pc;
8083 if (insn & (1 << 24)) {
8084 tmp = tcg_temp_new_i32();
8085 tcg_gen_movi_i32(tmp, val);
8086 store_reg(s, 14, tmp);
8088 offset = (((int32_t)insn << 8) >> 8);
8089 val += (offset << 2) + 4;
8097 if (disas_coproc_insn(env, s, insn))
8102 gen_set_pc_im(s->pc);
8103 s->is_jmp = DISAS_SWI;
8107 gen_exception_insn(s, 4, EXCP_UDEF);
8113 /* Return true if this is a Thumb-2 logical op. */
8115 thumb2_logic_op(int op)
8120 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8121 then set condition code flags based on the result of the operation.
8122 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8123 to the high bit of T1.
8124 Returns zero if the opcode is valid. */
8127 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
8134 tcg_gen_and_i32(t0, t0, t1);
8138 tcg_gen_andc_i32(t0, t0, t1);
8142 tcg_gen_or_i32(t0, t0, t1);
8146 tcg_gen_orc_i32(t0, t0, t1);
8150 tcg_gen_xor_i32(t0, t0, t1);
8155 gen_helper_add_cc(t0, t0, t1);
8157 tcg_gen_add_i32(t0, t0, t1);
8161 gen_helper_adc_cc(t0, t0, t1);
8167 gen_helper_sbc_cc(t0, t0, t1);
8169 gen_sub_carry(t0, t0, t1);
8173 gen_helper_sub_cc(t0, t0, t1);
8175 tcg_gen_sub_i32(t0, t0, t1);
8179 gen_helper_sub_cc(t0, t1, t0);
8181 tcg_gen_sub_i32(t0, t1, t0);
8183 default: /* 5, 6, 7, 9, 12, 15. */
8189 gen_set_CF_bit31(t1);
8194 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8196 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
8198 uint32_t insn, imm, shift, offset;
8199 uint32_t rd, rn, rm, rs;
8210 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8211 || arm_feature (env, ARM_FEATURE_M))) {
8212 /* Thumb-1 cores may need to treat bl and blx as a pair of
8213 16-bit instructions to get correct prefetch abort behavior. */
8215 if ((insn & (1 << 12)) == 0) {
8217 /* Second half of blx. */
8218 offset = ((insn & 0x7ff) << 1);
8219 tmp = load_reg(s, 14);
8220 tcg_gen_addi_i32(tmp, tmp, offset);
8221 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8223 tmp2 = tcg_temp_new_i32();
8224 tcg_gen_movi_i32(tmp2, s->pc | 1);
8225 store_reg(s, 14, tmp2);
8229 if (insn & (1 << 11)) {
8230 /* Second half of bl. */
8231 offset = ((insn & 0x7ff) << 1) | 1;
8232 tmp = load_reg(s, 14);
8233 tcg_gen_addi_i32(tmp, tmp, offset);
8235 tmp2 = tcg_temp_new_i32();
8236 tcg_gen_movi_i32(tmp2, s->pc | 1);
8237 store_reg(s, 14, tmp2);
8241 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8242 /* Instruction spans a page boundary. Implement it as two
8243 16-bit instructions in case the second half causes an
8245 offset = ((int32_t)insn << 21) >> 9;
8246 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8249 /* Fall through to 32-bit decode. */
8252 insn = arm_lduw_code(s->pc, s->bswap_code);
8254 insn |= (uint32_t)insn_hw1 << 16;
8256 if ((insn & 0xf800e800) != 0xf000e800) {
8260 rn = (insn >> 16) & 0xf;
8261 rs = (insn >> 12) & 0xf;
8262 rd = (insn >> 8) & 0xf;
8264 switch ((insn >> 25) & 0xf) {
8265 case 0: case 1: case 2: case 3:
8266 /* 16-bit instructions. Should never happen. */
8269 if (insn & (1 << 22)) {
8270 /* Other load/store, table branch. */
8271 if (insn & 0x01200000) {
8272 /* Load/store doubleword. */
8274 addr = tcg_temp_new_i32();
8275 tcg_gen_movi_i32(addr, s->pc & ~3);
8277 addr = load_reg(s, rn);
8279 offset = (insn & 0xff) * 4;
8280 if ((insn & (1 << 23)) == 0)
8282 if (insn & (1 << 24)) {
8283 tcg_gen_addi_i32(addr, addr, offset);
8286 if (insn & (1 << 20)) {
8288 tmp = gen_ld32(addr, IS_USER(s));
8289 store_reg(s, rs, tmp);
8290 tcg_gen_addi_i32(addr, addr, 4);
8291 tmp = gen_ld32(addr, IS_USER(s));
8292 store_reg(s, rd, tmp);
8295 tmp = load_reg(s, rs);
8296 gen_st32(tmp, addr, IS_USER(s));
8297 tcg_gen_addi_i32(addr, addr, 4);
8298 tmp = load_reg(s, rd);
8299 gen_st32(tmp, addr, IS_USER(s));
8301 if (insn & (1 << 21)) {
8302 /* Base writeback. */
8305 tcg_gen_addi_i32(addr, addr, offset - 4);
8306 store_reg(s, rn, addr);
8308 tcg_temp_free_i32(addr);
8310 } else if ((insn & (1 << 23)) == 0) {
8311 /* Load/store exclusive word. */
8312 addr = tcg_temp_local_new();
8313 load_reg_var(s, addr, rn);
8314 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8315 if (insn & (1 << 20)) {
8316 gen_load_exclusive(s, rs, 15, addr, 2);
8318 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8320 tcg_temp_free(addr);
8321 } else if ((insn & (1 << 6)) == 0) {
8324 addr = tcg_temp_new_i32();
8325 tcg_gen_movi_i32(addr, s->pc);
8327 addr = load_reg(s, rn);
8329 tmp = load_reg(s, rm);
8330 tcg_gen_add_i32(addr, addr, tmp);
8331 if (insn & (1 << 4)) {
8333 tcg_gen_add_i32(addr, addr, tmp);
8334 tcg_temp_free_i32(tmp);
8335 tmp = gen_ld16u(addr, IS_USER(s));
8337 tcg_temp_free_i32(tmp);
8338 tmp = gen_ld8u(addr, IS_USER(s));
8340 tcg_temp_free_i32(addr);
8341 tcg_gen_shli_i32(tmp, tmp, 1);
8342 tcg_gen_addi_i32(tmp, tmp, s->pc);
8343 store_reg(s, 15, tmp);
8345 /* Load/store exclusive byte/halfword/doubleword. */
8347 op = (insn >> 4) & 0x3;
8351 addr = tcg_temp_local_new();
8352 load_reg_var(s, addr, rn);
8353 if (insn & (1 << 20)) {
8354 gen_load_exclusive(s, rs, rd, addr, op);
8356 gen_store_exclusive(s, rm, rs, rd, addr, op);
8358 tcg_temp_free(addr);
8361 /* Load/store multiple, RFE, SRS. */
8362 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8363 /* Not available in user mode. */
8366 if (insn & (1 << 20)) {
8368 addr = load_reg(s, rn);
8369 if ((insn & (1 << 24)) == 0)
8370 tcg_gen_addi_i32(addr, addr, -8);
8371 /* Load PC into tmp and CPSR into tmp2. */
8372 tmp = gen_ld32(addr, 0);
8373 tcg_gen_addi_i32(addr, addr, 4);
8374 tmp2 = gen_ld32(addr, 0);
8375 if (insn & (1 << 21)) {
8376 /* Base writeback. */
8377 if (insn & (1 << 24)) {
8378 tcg_gen_addi_i32(addr, addr, 4);
8380 tcg_gen_addi_i32(addr, addr, -4);
8382 store_reg(s, rn, addr);
8384 tcg_temp_free_i32(addr);
8386 gen_rfe(s, tmp, tmp2);
8390 addr = tcg_temp_new_i32();
8391 tmp = tcg_const_i32(op);
8392 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8393 tcg_temp_free_i32(tmp);
8394 if ((insn & (1 << 24)) == 0) {
8395 tcg_gen_addi_i32(addr, addr, -8);
8397 tmp = load_reg(s, 14);
8398 gen_st32(tmp, addr, 0);
8399 tcg_gen_addi_i32(addr, addr, 4);
8400 tmp = tcg_temp_new_i32();
8401 gen_helper_cpsr_read(tmp);
8402 gen_st32(tmp, addr, 0);
8403 if (insn & (1 << 21)) {
8404 if ((insn & (1 << 24)) == 0) {
8405 tcg_gen_addi_i32(addr, addr, -4);
8407 tcg_gen_addi_i32(addr, addr, 4);
8409 tmp = tcg_const_i32(op);
8410 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8411 tcg_temp_free_i32(tmp);
8413 tcg_temp_free_i32(addr);
8417 int i, loaded_base = 0;
8419 /* Load/store multiple. */
8420 addr = load_reg(s, rn);
8422 for (i = 0; i < 16; i++) {
8423 if (insn & (1 << i))
8426 if (insn & (1 << 24)) {
8427 tcg_gen_addi_i32(addr, addr, -offset);
8430 TCGV_UNUSED(loaded_var);
8431 for (i = 0; i < 16; i++) {
8432 if ((insn & (1 << i)) == 0)
8434 if (insn & (1 << 20)) {
8436 tmp = gen_ld32(addr, IS_USER(s));
8439 } else if (i == rn) {
8443 store_reg(s, i, tmp);
8447 tmp = load_reg(s, i);
8448 gen_st32(tmp, addr, IS_USER(s));
8450 tcg_gen_addi_i32(addr, addr, 4);
8453 store_reg(s, rn, loaded_var);
8455 if (insn & (1 << 21)) {
8456 /* Base register writeback. */
8457 if (insn & (1 << 24)) {
8458 tcg_gen_addi_i32(addr, addr, -offset);
8460 /* Fault if writeback register is in register list. */
8461 if (insn & (1 << rn))
8463 store_reg(s, rn, addr);
8465 tcg_temp_free_i32(addr);
8472 op = (insn >> 21) & 0xf;
8474 /* Halfword pack. */
8475 tmp = load_reg(s, rn);
8476 tmp2 = load_reg(s, rm);
8477 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8478 if (insn & (1 << 5)) {
8482 tcg_gen_sari_i32(tmp2, tmp2, shift);
8483 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8484 tcg_gen_ext16u_i32(tmp2, tmp2);
8488 tcg_gen_shli_i32(tmp2, tmp2, shift);
8489 tcg_gen_ext16u_i32(tmp, tmp);
8490 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8492 tcg_gen_or_i32(tmp, tmp, tmp2);
8493 tcg_temp_free_i32(tmp2);
8494 store_reg(s, rd, tmp);
8496 /* Data processing register constant shift. */
8498 tmp = tcg_temp_new_i32();
8499 tcg_gen_movi_i32(tmp, 0);
8501 tmp = load_reg(s, rn);
8503 tmp2 = load_reg(s, rm);
8505 shiftop = (insn >> 4) & 3;
8506 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8507 conds = (insn & (1 << 20)) != 0;
8508 logic_cc = (conds && thumb2_logic_op(op));
8509 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8510 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8512 tcg_temp_free_i32(tmp2);
8514 store_reg(s, rd, tmp);
8516 tcg_temp_free_i32(tmp);
8520 case 13: /* Misc data processing. */
8521 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8522 if (op < 4 && (insn & 0xf000) != 0xf000)
8525 case 0: /* Register controlled shift. */
8526 tmp = load_reg(s, rn);
8527 tmp2 = load_reg(s, rm);
8528 if ((insn & 0x70) != 0)
8530 op = (insn >> 21) & 3;
8531 logic_cc = (insn & (1 << 20)) != 0;
8532 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8535 store_reg_bx(env, s, rd, tmp);
8537 case 1: /* Sign/zero extend. */
8538 tmp = load_reg(s, rm);
8539 shift = (insn >> 4) & 3;
8540 /* ??? In many cases it's not necessary to do a
8541 rotate, a shift is sufficient. */
8543 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8544 op = (insn >> 20) & 7;
8546 case 0: gen_sxth(tmp); break;
8547 case 1: gen_uxth(tmp); break;
8548 case 2: gen_sxtb16(tmp); break;
8549 case 3: gen_uxtb16(tmp); break;
8550 case 4: gen_sxtb(tmp); break;
8551 case 5: gen_uxtb(tmp); break;
8552 default: goto illegal_op;
8555 tmp2 = load_reg(s, rn);
8556 if ((op >> 1) == 1) {
8557 gen_add16(tmp, tmp2);
8559 tcg_gen_add_i32(tmp, tmp, tmp2);
8560 tcg_temp_free_i32(tmp2);
8563 store_reg(s, rd, tmp);
8565 case 2: /* SIMD add/subtract. */
8566 op = (insn >> 20) & 7;
8567 shift = (insn >> 4) & 7;
8568 if ((op & 3) == 3 || (shift & 3) == 3)
8570 tmp = load_reg(s, rn);
8571 tmp2 = load_reg(s, rm);
8572 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8573 tcg_temp_free_i32(tmp2);
8574 store_reg(s, rd, tmp);
8576 case 3: /* Other data processing. */
8577 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8579 /* Saturating add/subtract. */
8580 tmp = load_reg(s, rn);
8581 tmp2 = load_reg(s, rm);
8583 gen_helper_double_saturate(tmp, tmp);
8585 gen_helper_sub_saturate(tmp, tmp2, tmp);
8587 gen_helper_add_saturate(tmp, tmp, tmp2);
8588 tcg_temp_free_i32(tmp2);
8590 tmp = load_reg(s, rn);
8592 case 0x0a: /* rbit */
8593 gen_helper_rbit(tmp, tmp);
8595 case 0x08: /* rev */
8596 tcg_gen_bswap32_i32(tmp, tmp);
8598 case 0x09: /* rev16 */
8601 case 0x0b: /* revsh */
8604 case 0x10: /* sel */
8605 tmp2 = load_reg(s, rm);
8606 tmp3 = tcg_temp_new_i32();
8607 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8608 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8609 tcg_temp_free_i32(tmp3);
8610 tcg_temp_free_i32(tmp2);
8612 case 0x18: /* clz */
8613 gen_helper_clz(tmp, tmp);
8619 store_reg(s, rd, tmp);
8621 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8622 op = (insn >> 4) & 0xf;
8623 tmp = load_reg(s, rn);
8624 tmp2 = load_reg(s, rm);
8625 switch ((insn >> 20) & 7) {
8626 case 0: /* 32 x 32 -> 32 */
8627 tcg_gen_mul_i32(tmp, tmp, tmp2);
8628 tcg_temp_free_i32(tmp2);
8630 tmp2 = load_reg(s, rs);
8632 tcg_gen_sub_i32(tmp, tmp2, tmp);
8634 tcg_gen_add_i32(tmp, tmp, tmp2);
8635 tcg_temp_free_i32(tmp2);
8638 case 1: /* 16 x 16 -> 32 */
8639 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8640 tcg_temp_free_i32(tmp2);
8642 tmp2 = load_reg(s, rs);
8643 gen_helper_add_setq(tmp, tmp, tmp2);
8644 tcg_temp_free_i32(tmp2);
8647 case 2: /* Dual multiply add. */
8648 case 4: /* Dual multiply subtract. */
8650 gen_swap_half(tmp2);
8651 gen_smul_dual(tmp, tmp2);
8652 if (insn & (1 << 22)) {
8653 /* This subtraction cannot overflow. */
8654 tcg_gen_sub_i32(tmp, tmp, tmp2);
8656 /* This addition cannot overflow 32 bits;
8657 * however it may overflow considered as a signed
8658 * operation, in which case we must set the Q flag.
8660 gen_helper_add_setq(tmp, tmp, tmp2);
8662 tcg_temp_free_i32(tmp2);
8665 tmp2 = load_reg(s, rs);
8666 gen_helper_add_setq(tmp, tmp, tmp2);
8667 tcg_temp_free_i32(tmp2);
8670 case 3: /* 32 * 16 -> 32msb */
8672 tcg_gen_sari_i32(tmp2, tmp2, 16);
8675 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8676 tcg_gen_shri_i64(tmp64, tmp64, 16);
8677 tmp = tcg_temp_new_i32();
8678 tcg_gen_trunc_i64_i32(tmp, tmp64);
8679 tcg_temp_free_i64(tmp64);
8682 tmp2 = load_reg(s, rs);
8683 gen_helper_add_setq(tmp, tmp, tmp2);
8684 tcg_temp_free_i32(tmp2);
8687 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8688 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8690 tmp = load_reg(s, rs);
8691 if (insn & (1 << 20)) {
8692 tmp64 = gen_addq_msw(tmp64, tmp);
8694 tmp64 = gen_subq_msw(tmp64, tmp);
8697 if (insn & (1 << 4)) {
8698 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8700 tcg_gen_shri_i64(tmp64, tmp64, 32);
8701 tmp = tcg_temp_new_i32();
8702 tcg_gen_trunc_i64_i32(tmp, tmp64);
8703 tcg_temp_free_i64(tmp64);
8705 case 7: /* Unsigned sum of absolute differences. */
8706 gen_helper_usad8(tmp, tmp, tmp2);
8707 tcg_temp_free_i32(tmp2);
8709 tmp2 = load_reg(s, rs);
8710 tcg_gen_add_i32(tmp, tmp, tmp2);
8711 tcg_temp_free_i32(tmp2);
8715 store_reg(s, rd, tmp);
8717 case 6: case 7: /* 64-bit multiply, Divide. */
8718 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8719 tmp = load_reg(s, rn);
8720 tmp2 = load_reg(s, rm);
8721 if ((op & 0x50) == 0x10) {
8723 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8727 gen_helper_udiv(tmp, tmp, tmp2);
8729 gen_helper_sdiv(tmp, tmp, tmp2);
8730 tcg_temp_free_i32(tmp2);
8731 store_reg(s, rd, tmp);
8732 } else if ((op & 0xe) == 0xc) {
8733 /* Dual multiply accumulate long. */
8735 gen_swap_half(tmp2);
8736 gen_smul_dual(tmp, tmp2);
8738 tcg_gen_sub_i32(tmp, tmp, tmp2);
8740 tcg_gen_add_i32(tmp, tmp, tmp2);
8742 tcg_temp_free_i32(tmp2);
8744 tmp64 = tcg_temp_new_i64();
8745 tcg_gen_ext_i32_i64(tmp64, tmp);
8746 tcg_temp_free_i32(tmp);
8747 gen_addq(s, tmp64, rs, rd);
8748 gen_storeq_reg(s, rs, rd, tmp64);
8749 tcg_temp_free_i64(tmp64);
8752 /* Unsigned 64-bit multiply */
8753 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8757 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8758 tcg_temp_free_i32(tmp2);
8759 tmp64 = tcg_temp_new_i64();
8760 tcg_gen_ext_i32_i64(tmp64, tmp);
8761 tcg_temp_free_i32(tmp);
8763 /* Signed 64-bit multiply */
8764 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8769 gen_addq_lo(s, tmp64, rs);
8770 gen_addq_lo(s, tmp64, rd);
8771 } else if (op & 0x40) {
8772 /* 64-bit accumulate. */
8773 gen_addq(s, tmp64, rs, rd);
8775 gen_storeq_reg(s, rs, rd, tmp64);
8776 tcg_temp_free_i64(tmp64);
8781 case 6: case 7: case 14: case 15:
8783 if (((insn >> 24) & 3) == 3) {
8784 /* Translate into the equivalent ARM encoding. */
8785 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8786 if (disas_neon_data_insn(env, s, insn))
8789 if (insn & (1 << 28))
8791 if (disas_coproc_insn (env, s, insn))
8795 case 8: case 9: case 10: case 11:
8796 if (insn & (1 << 15)) {
8797 /* Branches, misc control. */
8798 if (insn & 0x5000) {
8799 /* Unconditional branch. */
8800 /* signextend(hw1[10:0]) -> offset[:12]. */
8801 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8802 /* hw1[10:0] -> offset[11:1]. */
8803 offset |= (insn & 0x7ff) << 1;
8804 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8805 offset[24:22] already have the same value because of the
8806 sign extension above. */
8807 offset ^= ((~insn) & (1 << 13)) << 10;
8808 offset ^= ((~insn) & (1 << 11)) << 11;
8810 if (insn & (1 << 14)) {
8811 /* Branch and link. */
8812 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8816 if (insn & (1 << 12)) {
8821 offset &= ~(uint32_t)2;
8822 /* thumb2 bx, no need to check */
8823 gen_bx_im(s, offset);
8825 } else if (((insn >> 23) & 7) == 7) {
8827 if (insn & (1 << 13))
8830 if (insn & (1 << 26)) {
8831 /* Secure monitor call (v6Z) */
8832 goto illegal_op; /* not implemented. */
8834 op = (insn >> 20) & 7;
8836 case 0: /* msr cpsr. */
8838 tmp = load_reg(s, rn);
8839 addr = tcg_const_i32(insn & 0xff);
8840 gen_helper_v7m_msr(cpu_env, addr, tmp);
8841 tcg_temp_free_i32(addr);
8842 tcg_temp_free_i32(tmp);
8847 case 1: /* msr spsr. */
8850 tmp = load_reg(s, rn);
8852 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8856 case 2: /* cps, nop-hint. */
8857 if (((insn >> 8) & 7) == 0) {
8858 gen_nop_hint(s, insn & 0xff);
8860 /* Implemented as NOP in user mode. */
8865 if (insn & (1 << 10)) {
8866 if (insn & (1 << 7))
8868 if (insn & (1 << 6))
8870 if (insn & (1 << 5))
8872 if (insn & (1 << 9))
8873 imm = CPSR_A | CPSR_I | CPSR_F;
8875 if (insn & (1 << 8)) {
8877 imm |= (insn & 0x1f);
8880 gen_set_psr_im(s, offset, 0, imm);
8883 case 3: /* Special control operations. */
8885 op = (insn >> 4) & 0xf;
8893 /* These execute as NOPs. */
8900 /* Trivial implementation equivalent to bx. */
8901 tmp = load_reg(s, rn);
8904 case 5: /* Exception return. */
8908 if (rn != 14 || rd != 15) {
8911 tmp = load_reg(s, rn);
8912 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8913 gen_exception_return(s, tmp);
8915 case 6: /* mrs cpsr. */
8916 tmp = tcg_temp_new_i32();
8918 addr = tcg_const_i32(insn & 0xff);
8919 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8920 tcg_temp_free_i32(addr);
8922 gen_helper_cpsr_read(tmp);
8924 store_reg(s, rd, tmp);
8926 case 7: /* mrs spsr. */
8927 /* Not accessible in user mode. */
8928 if (IS_USER(s) || IS_M(env))
8930 tmp = load_cpu_field(spsr);
8931 store_reg(s, rd, tmp);
8936 /* Conditional branch. */
8937 op = (insn >> 22) & 0xf;
8938 /* Generate a conditional jump to next instruction. */
8939 s->condlabel = gen_new_label();
8940 gen_test_cc(op ^ 1, s->condlabel);
8943 /* offset[11:1] = insn[10:0] */
8944 offset = (insn & 0x7ff) << 1;
8945 /* offset[17:12] = insn[21:16]. */
8946 offset |= (insn & 0x003f0000) >> 4;
8947 /* offset[31:20] = insn[26]. */
8948 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8949 /* offset[18] = insn[13]. */
8950 offset |= (insn & (1 << 13)) << 5;
8951 /* offset[19] = insn[11]. */
8952 offset |= (insn & (1 << 11)) << 8;
8954 /* jump to the offset */
8955 gen_jmp(s, s->pc + offset);
8958 /* Data processing immediate. */
8959 if (insn & (1 << 25)) {
8960 if (insn & (1 << 24)) {
8961 if (insn & (1 << 20))
8963 /* Bitfield/Saturate. */
8964 op = (insn >> 21) & 7;
8966 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8968 tmp = tcg_temp_new_i32();
8969 tcg_gen_movi_i32(tmp, 0);
8971 tmp = load_reg(s, rn);
8974 case 2: /* Signed bitfield extract. */
8976 if (shift + imm > 32)
8979 gen_sbfx(tmp, shift, imm);
8981 case 6: /* Unsigned bitfield extract. */
8983 if (shift + imm > 32)
8986 gen_ubfx(tmp, shift, (1u << imm) - 1);
8988 case 3: /* Bitfield insert/clear. */
8991 imm = imm + 1 - shift;
8993 tmp2 = load_reg(s, rd);
8994 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8995 tcg_temp_free_i32(tmp2);
9000 default: /* Saturate. */
9003 tcg_gen_sari_i32(tmp, tmp, shift);
9005 tcg_gen_shli_i32(tmp, tmp, shift);
9007 tmp2 = tcg_const_i32(imm);
9010 if ((op & 1) && shift == 0)
9011 gen_helper_usat16(tmp, tmp, tmp2);
9013 gen_helper_usat(tmp, tmp, tmp2);
9016 if ((op & 1) && shift == 0)
9017 gen_helper_ssat16(tmp, tmp, tmp2);
9019 gen_helper_ssat(tmp, tmp, tmp2);
9021 tcg_temp_free_i32(tmp2);
9024 store_reg(s, rd, tmp);
9026 imm = ((insn & 0x04000000) >> 15)
9027 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9028 if (insn & (1 << 22)) {
9029 /* 16-bit immediate. */
9030 imm |= (insn >> 4) & 0xf000;
9031 if (insn & (1 << 23)) {
9033 tmp = load_reg(s, rd);
9034 tcg_gen_ext16u_i32(tmp, tmp);
9035 tcg_gen_ori_i32(tmp, tmp, imm << 16);
9038 tmp = tcg_temp_new_i32();
9039 tcg_gen_movi_i32(tmp, imm);
9042 /* Add/sub 12-bit immediate. */
9044 offset = s->pc & ~(uint32_t)3;
9045 if (insn & (1 << 23))
9049 tmp = tcg_temp_new_i32();
9050 tcg_gen_movi_i32(tmp, offset);
9052 tmp = load_reg(s, rn);
9053 if (insn & (1 << 23))
9054 tcg_gen_subi_i32(tmp, tmp, imm);
9056 tcg_gen_addi_i32(tmp, tmp, imm);
9059 store_reg(s, rd, tmp);
9062 int shifter_out = 0;
9063 /* modified 12-bit immediate. */
9064 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9065 imm = (insn & 0xff);
9068 /* Nothing to do. */
9070 case 1: /* 00XY00XY */
9073 case 2: /* XY00XY00 */
9077 case 3: /* XYXYXYXY */
9081 default: /* Rotated constant. */
9082 shift = (shift << 1) | (imm >> 7);
9084 imm = imm << (32 - shift);
9088 tmp2 = tcg_temp_new_i32();
9089 tcg_gen_movi_i32(tmp2, imm);
9090 rn = (insn >> 16) & 0xf;
9092 tmp = tcg_temp_new_i32();
9093 tcg_gen_movi_i32(tmp, 0);
9095 tmp = load_reg(s, rn);
9097 op = (insn >> 21) & 0xf;
9098 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
9099 shifter_out, tmp, tmp2))
9101 tcg_temp_free_i32(tmp2);
9102 rd = (insn >> 8) & 0xf;
9104 store_reg(s, rd, tmp);
9106 tcg_temp_free_i32(tmp);
9111 case 12: /* Load/store single data item. */
9116 if ((insn & 0x01100000) == 0x01000000) {
9117 if (disas_neon_ls_insn(env, s, insn))
9121 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9123 if (!(insn & (1 << 20))) {
9127 /* Byte or halfword load space with dest == r15 : memory hints.
9128 * Catch them early so we don't emit pointless addressing code.
9129 * This space is a mix of:
9130 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9131 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9133 * unallocated hints, which must be treated as NOPs
9134 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9135 * which is easiest for the decoding logic
9136 * Some space which must UNDEF
9138 int op1 = (insn >> 23) & 3;
9139 int op2 = (insn >> 6) & 0x3f;
9144 /* UNPREDICTABLE, unallocated hint or
9145 * PLD/PLDW/PLI (literal)
9150 return 0; /* PLD/PLDW/PLI or unallocated hint */
9152 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
9153 return 0; /* PLD/PLDW/PLI or unallocated hint */
9155 /* UNDEF space, or an UNPREDICTABLE */
9161 addr = tcg_temp_new_i32();
9163 /* s->pc has already been incremented by 4. */
9164 imm = s->pc & 0xfffffffc;
9165 if (insn & (1 << 23))
9166 imm += insn & 0xfff;
9168 imm -= insn & 0xfff;
9169 tcg_gen_movi_i32(addr, imm);
9171 addr = load_reg(s, rn);
9172 if (insn & (1 << 23)) {
9173 /* Positive offset. */
9175 tcg_gen_addi_i32(addr, addr, imm);
9178 switch ((insn >> 8) & 0xf) {
9179 case 0x0: /* Shifted Register. */
9180 shift = (insn >> 4) & 0xf;
9182 tcg_temp_free_i32(addr);
9185 tmp = load_reg(s, rm);
9187 tcg_gen_shli_i32(tmp, tmp, shift);
9188 tcg_gen_add_i32(addr, addr, tmp);
9189 tcg_temp_free_i32(tmp);
9191 case 0xc: /* Negative offset. */
9192 tcg_gen_addi_i32(addr, addr, -imm);
9194 case 0xe: /* User privilege. */
9195 tcg_gen_addi_i32(addr, addr, imm);
9198 case 0x9: /* Post-decrement. */
9201 case 0xb: /* Post-increment. */
9205 case 0xd: /* Pre-decrement. */
9208 case 0xf: /* Pre-increment. */
9209 tcg_gen_addi_i32(addr, addr, imm);
9213 tcg_temp_free_i32(addr);
9218 if (insn & (1 << 20)) {
9221 case 0: tmp = gen_ld8u(addr, user); break;
9222 case 4: tmp = gen_ld8s(addr, user); break;
9223 case 1: tmp = gen_ld16u(addr, user); break;
9224 case 5: tmp = gen_ld16s(addr, user); break;
9225 case 2: tmp = gen_ld32(addr, user); break;
9227 tcg_temp_free_i32(addr);
9233 store_reg(s, rs, tmp);
9237 tmp = load_reg(s, rs);
9239 case 0: gen_st8(tmp, addr, user); break;
9240 case 1: gen_st16(tmp, addr, user); break;
9241 case 2: gen_st32(tmp, addr, user); break;
9243 tcg_temp_free_i32(addr);
9248 tcg_gen_addi_i32(addr, addr, imm);
9250 store_reg(s, rn, addr);
9252 tcg_temp_free_i32(addr);
9264 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9266 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9273 if (s->condexec_mask) {
9274 cond = s->condexec_cond;
9275 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9276 s->condlabel = gen_new_label();
9277 gen_test_cc(cond ^ 1, s->condlabel);
9282 insn = arm_lduw_code(s->pc, s->bswap_code);
9285 switch (insn >> 12) {
9289 op = (insn >> 11) & 3;
9292 rn = (insn >> 3) & 7;
9293 tmp = load_reg(s, rn);
9294 if (insn & (1 << 10)) {
9296 tmp2 = tcg_temp_new_i32();
9297 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9300 rm = (insn >> 6) & 7;
9301 tmp2 = load_reg(s, rm);
9303 if (insn & (1 << 9)) {
9304 if (s->condexec_mask)
9305 tcg_gen_sub_i32(tmp, tmp, tmp2);
9307 gen_helper_sub_cc(tmp, tmp, tmp2);
9309 if (s->condexec_mask)
9310 tcg_gen_add_i32(tmp, tmp, tmp2);
9312 gen_helper_add_cc(tmp, tmp, tmp2);
9314 tcg_temp_free_i32(tmp2);
9315 store_reg(s, rd, tmp);
9317 /* shift immediate */
9318 rm = (insn >> 3) & 7;
9319 shift = (insn >> 6) & 0x1f;
9320 tmp = load_reg(s, rm);
9321 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9322 if (!s->condexec_mask)
9324 store_reg(s, rd, tmp);
9328 /* arithmetic large immediate */
9329 op = (insn >> 11) & 3;
9330 rd = (insn >> 8) & 0x7;
9331 if (op == 0) { /* mov */
9332 tmp = tcg_temp_new_i32();
9333 tcg_gen_movi_i32(tmp, insn & 0xff);
9334 if (!s->condexec_mask)
9336 store_reg(s, rd, tmp);
9338 tmp = load_reg(s, rd);
9339 tmp2 = tcg_temp_new_i32();
9340 tcg_gen_movi_i32(tmp2, insn & 0xff);
9343 gen_helper_sub_cc(tmp, tmp, tmp2);
9344 tcg_temp_free_i32(tmp);
9345 tcg_temp_free_i32(tmp2);
9348 if (s->condexec_mask)
9349 tcg_gen_add_i32(tmp, tmp, tmp2);
9351 gen_helper_add_cc(tmp, tmp, tmp2);
9352 tcg_temp_free_i32(tmp2);
9353 store_reg(s, rd, tmp);
9356 if (s->condexec_mask)
9357 tcg_gen_sub_i32(tmp, tmp, tmp2);
9359 gen_helper_sub_cc(tmp, tmp, tmp2);
9360 tcg_temp_free_i32(tmp2);
9361 store_reg(s, rd, tmp);
9367 if (insn & (1 << 11)) {
9368 rd = (insn >> 8) & 7;
9369 /* load pc-relative. Bit 1 of PC is ignored. */
9370 val = s->pc + 2 + ((insn & 0xff) * 4);
9371 val &= ~(uint32_t)2;
9372 addr = tcg_temp_new_i32();
9373 tcg_gen_movi_i32(addr, val);
9374 tmp = gen_ld32(addr, IS_USER(s));
9375 tcg_temp_free_i32(addr);
9376 store_reg(s, rd, tmp);
9379 if (insn & (1 << 10)) {
9380 /* data processing extended or blx */
9381 rd = (insn & 7) | ((insn >> 4) & 8);
9382 rm = (insn >> 3) & 0xf;
9383 op = (insn >> 8) & 3;
9386 tmp = load_reg(s, rd);
9387 tmp2 = load_reg(s, rm);
9388 tcg_gen_add_i32(tmp, tmp, tmp2);
9389 tcg_temp_free_i32(tmp2);
9390 store_reg(s, rd, tmp);
9393 tmp = load_reg(s, rd);
9394 tmp2 = load_reg(s, rm);
9395 gen_helper_sub_cc(tmp, tmp, tmp2);
9396 tcg_temp_free_i32(tmp2);
9397 tcg_temp_free_i32(tmp);
9399 case 2: /* mov/cpy */
9400 tmp = load_reg(s, rm);
9401 store_reg(s, rd, tmp);
9403 case 3:/* branch [and link] exchange thumb register */
9404 tmp = load_reg(s, rm);
9405 if (insn & (1 << 7)) {
9407 val = (uint32_t)s->pc | 1;
9408 tmp2 = tcg_temp_new_i32();
9409 tcg_gen_movi_i32(tmp2, val);
9410 store_reg(s, 14, tmp2);
9412 /* already thumb, no need to check */
9419 /* data processing register */
9421 rm = (insn >> 3) & 7;
9422 op = (insn >> 6) & 0xf;
9423 if (op == 2 || op == 3 || op == 4 || op == 7) {
9424 /* the shift/rotate ops want the operands backwards */
9433 if (op == 9) { /* neg */
9434 tmp = tcg_temp_new_i32();
9435 tcg_gen_movi_i32(tmp, 0);
9436 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9437 tmp = load_reg(s, rd);
9442 tmp2 = load_reg(s, rm);
9445 tcg_gen_and_i32(tmp, tmp, tmp2);
9446 if (!s->condexec_mask)
9450 tcg_gen_xor_i32(tmp, tmp, tmp2);
9451 if (!s->condexec_mask)
9455 if (s->condexec_mask) {
9456 gen_helper_shl(tmp2, tmp2, tmp);
9458 gen_helper_shl_cc(tmp2, tmp2, tmp);
9463 if (s->condexec_mask) {
9464 gen_helper_shr(tmp2, tmp2, tmp);
9466 gen_helper_shr_cc(tmp2, tmp2, tmp);
9471 if (s->condexec_mask) {
9472 gen_helper_sar(tmp2, tmp2, tmp);
9474 gen_helper_sar_cc(tmp2, tmp2, tmp);
9479 if (s->condexec_mask)
9482 gen_helper_adc_cc(tmp, tmp, tmp2);
9485 if (s->condexec_mask)
9486 gen_sub_carry(tmp, tmp, tmp2);
9488 gen_helper_sbc_cc(tmp, tmp, tmp2);
9491 if (s->condexec_mask) {
9492 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9493 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9495 gen_helper_ror_cc(tmp2, tmp2, tmp);
9500 tcg_gen_and_i32(tmp, tmp, tmp2);
9505 if (s->condexec_mask)
9506 tcg_gen_neg_i32(tmp, tmp2);
9508 gen_helper_sub_cc(tmp, tmp, tmp2);
9511 gen_helper_sub_cc(tmp, tmp, tmp2);
9515 gen_helper_add_cc(tmp, tmp, tmp2);
9519 tcg_gen_or_i32(tmp, tmp, tmp2);
9520 if (!s->condexec_mask)
9524 tcg_gen_mul_i32(tmp, tmp, tmp2);
9525 if (!s->condexec_mask)
9529 tcg_gen_andc_i32(tmp, tmp, tmp2);
9530 if (!s->condexec_mask)
9534 tcg_gen_not_i32(tmp2, tmp2);
9535 if (!s->condexec_mask)
9543 store_reg(s, rm, tmp2);
9545 tcg_temp_free_i32(tmp);
9547 store_reg(s, rd, tmp);
9548 tcg_temp_free_i32(tmp2);
9551 tcg_temp_free_i32(tmp);
9552 tcg_temp_free_i32(tmp2);
9557 /* load/store register offset. */
9559 rn = (insn >> 3) & 7;
9560 rm = (insn >> 6) & 7;
9561 op = (insn >> 9) & 7;
9562 addr = load_reg(s, rn);
9563 tmp = load_reg(s, rm);
9564 tcg_gen_add_i32(addr, addr, tmp);
9565 tcg_temp_free_i32(tmp);
9567 if (op < 3) /* store */
9568 tmp = load_reg(s, rd);
9572 gen_st32(tmp, addr, IS_USER(s));
9575 gen_st16(tmp, addr, IS_USER(s));
9578 gen_st8(tmp, addr, IS_USER(s));
9581 tmp = gen_ld8s(addr, IS_USER(s));
9584 tmp = gen_ld32(addr, IS_USER(s));
9587 tmp = gen_ld16u(addr, IS_USER(s));
9590 tmp = gen_ld8u(addr, IS_USER(s));
9593 tmp = gen_ld16s(addr, IS_USER(s));
9596 if (op >= 3) /* load */
9597 store_reg(s, rd, tmp);
9598 tcg_temp_free_i32(addr);
9602 /* load/store word immediate offset */
9604 rn = (insn >> 3) & 7;
9605 addr = load_reg(s, rn);
9606 val = (insn >> 4) & 0x7c;
9607 tcg_gen_addi_i32(addr, addr, val);
9609 if (insn & (1 << 11)) {
9611 tmp = gen_ld32(addr, IS_USER(s));
9612 store_reg(s, rd, tmp);
9615 tmp = load_reg(s, rd);
9616 gen_st32(tmp, addr, IS_USER(s));
9618 tcg_temp_free_i32(addr);
9622 /* load/store byte immediate offset */
9624 rn = (insn >> 3) & 7;
9625 addr = load_reg(s, rn);
9626 val = (insn >> 6) & 0x1f;
9627 tcg_gen_addi_i32(addr, addr, val);
9629 if (insn & (1 << 11)) {
9631 tmp = gen_ld8u(addr, IS_USER(s));
9632 store_reg(s, rd, tmp);
9635 tmp = load_reg(s, rd);
9636 gen_st8(tmp, addr, IS_USER(s));
9638 tcg_temp_free_i32(addr);
9642 /* load/store halfword immediate offset */
9644 rn = (insn >> 3) & 7;
9645 addr = load_reg(s, rn);
9646 val = (insn >> 5) & 0x3e;
9647 tcg_gen_addi_i32(addr, addr, val);
9649 if (insn & (1 << 11)) {
9651 tmp = gen_ld16u(addr, IS_USER(s));
9652 store_reg(s, rd, tmp);
9655 tmp = load_reg(s, rd);
9656 gen_st16(tmp, addr, IS_USER(s));
9658 tcg_temp_free_i32(addr);
9662 /* load/store from stack */
9663 rd = (insn >> 8) & 7;
9664 addr = load_reg(s, 13);
9665 val = (insn & 0xff) * 4;
9666 tcg_gen_addi_i32(addr, addr, val);
9668 if (insn & (1 << 11)) {
9670 tmp = gen_ld32(addr, IS_USER(s));
9671 store_reg(s, rd, tmp);
9674 tmp = load_reg(s, rd);
9675 gen_st32(tmp, addr, IS_USER(s));
9677 tcg_temp_free_i32(addr);
9681 /* add to high reg */
9682 rd = (insn >> 8) & 7;
9683 if (insn & (1 << 11)) {
9685 tmp = load_reg(s, 13);
9687 /* PC. bit 1 is ignored. */
9688 tmp = tcg_temp_new_i32();
9689 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9691 val = (insn & 0xff) * 4;
9692 tcg_gen_addi_i32(tmp, tmp, val);
9693 store_reg(s, rd, tmp);
9698 op = (insn >> 8) & 0xf;
9701 /* adjust stack pointer */
9702 tmp = load_reg(s, 13);
9703 val = (insn & 0x7f) * 4;
9704 if (insn & (1 << 7))
9705 val = -(int32_t)val;
9706 tcg_gen_addi_i32(tmp, tmp, val);
9707 store_reg(s, 13, tmp);
9710 case 2: /* sign/zero extend. */
9713 rm = (insn >> 3) & 7;
9714 tmp = load_reg(s, rm);
9715 switch ((insn >> 6) & 3) {
9716 case 0: gen_sxth(tmp); break;
9717 case 1: gen_sxtb(tmp); break;
9718 case 2: gen_uxth(tmp); break;
9719 case 3: gen_uxtb(tmp); break;
9721 store_reg(s, rd, tmp);
9723 case 4: case 5: case 0xc: case 0xd:
9725 addr = load_reg(s, 13);
9726 if (insn & (1 << 8))
9730 for (i = 0; i < 8; i++) {
9731 if (insn & (1 << i))
9734 if ((insn & (1 << 11)) == 0) {
9735 tcg_gen_addi_i32(addr, addr, -offset);
9737 for (i = 0; i < 8; i++) {
9738 if (insn & (1 << i)) {
9739 if (insn & (1 << 11)) {
9741 tmp = gen_ld32(addr, IS_USER(s));
9742 store_reg(s, i, tmp);
9745 tmp = load_reg(s, i);
9746 gen_st32(tmp, addr, IS_USER(s));
9748 /* advance to the next address. */
9749 tcg_gen_addi_i32(addr, addr, 4);
9753 if (insn & (1 << 8)) {
9754 if (insn & (1 << 11)) {
9756 tmp = gen_ld32(addr, IS_USER(s));
9757 /* don't set the pc until the rest of the instruction
9761 tmp = load_reg(s, 14);
9762 gen_st32(tmp, addr, IS_USER(s));
9764 tcg_gen_addi_i32(addr, addr, 4);
9766 if ((insn & (1 << 11)) == 0) {
9767 tcg_gen_addi_i32(addr, addr, -offset);
9769 /* write back the new stack pointer */
9770 store_reg(s, 13, addr);
9771 /* set the new PC value */
9772 if ((insn & 0x0900) == 0x0900) {
9773 store_reg_from_load(env, s, 15, tmp);
9777 case 1: case 3: case 9: case 11: /* czb */
9779 tmp = load_reg(s, rm);
9780 s->condlabel = gen_new_label();
9782 if (insn & (1 << 11))
9783 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9785 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9786 tcg_temp_free_i32(tmp);
9787 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9788 val = (uint32_t)s->pc + 2;
9793 case 15: /* IT, nop-hint. */
9794 if ((insn & 0xf) == 0) {
9795 gen_nop_hint(s, (insn >> 4) & 0xf);
9799 s->condexec_cond = (insn >> 4) & 0xe;
9800 s->condexec_mask = insn & 0x1f;
9801 /* No actual code generated for this insn, just setup state. */
9804 case 0xe: /* bkpt */
9806 gen_exception_insn(s, 2, EXCP_BKPT);
9811 rn = (insn >> 3) & 0x7;
9813 tmp = load_reg(s, rn);
9814 switch ((insn >> 6) & 3) {
9815 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9816 case 1: gen_rev16(tmp); break;
9817 case 3: gen_revsh(tmp); break;
9818 default: goto illegal_op;
9820 store_reg(s, rd, tmp);
9824 switch ((insn >> 5) & 7) {
9828 if (((insn >> 3) & 1) != s->bswap_code) {
9829 /* Dynamic endianness switching not implemented. */
9840 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9843 addr = tcg_const_i32(19);
9844 gen_helper_v7m_msr(cpu_env, addr, tmp);
9845 tcg_temp_free_i32(addr);
9849 addr = tcg_const_i32(16);
9850 gen_helper_v7m_msr(cpu_env, addr, tmp);
9851 tcg_temp_free_i32(addr);
9853 tcg_temp_free_i32(tmp);
9856 if (insn & (1 << 4)) {
9857 shift = CPSR_A | CPSR_I | CPSR_F;
9861 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9876 /* load/store multiple */
9878 TCGV_UNUSED(loaded_var);
9879 rn = (insn >> 8) & 0x7;
9880 addr = load_reg(s, rn);
9881 for (i = 0; i < 8; i++) {
9882 if (insn & (1 << i)) {
9883 if (insn & (1 << 11)) {
9885 tmp = gen_ld32(addr, IS_USER(s));
9889 store_reg(s, i, tmp);
9893 tmp = load_reg(s, i);
9894 gen_st32(tmp, addr, IS_USER(s));
9896 /* advance to the next address */
9897 tcg_gen_addi_i32(addr, addr, 4);
9900 if ((insn & (1 << rn)) == 0) {
9901 /* base reg not in list: base register writeback */
9902 store_reg(s, rn, addr);
9904 /* base reg in list: if load, complete it now */
9905 if (insn & (1 << 11)) {
9906 store_reg(s, rn, loaded_var);
9908 tcg_temp_free_i32(addr);
9913 /* conditional branch or swi */
9914 cond = (insn >> 8) & 0xf;
9920 gen_set_pc_im(s->pc);
9921 s->is_jmp = DISAS_SWI;
9924 /* generate a conditional jump to next instruction */
9925 s->condlabel = gen_new_label();
9926 gen_test_cc(cond ^ 1, s->condlabel);
9929 /* jump to the offset */
9930 val = (uint32_t)s->pc + 2;
9931 offset = ((int32_t)insn << 24) >> 24;
9937 if (insn & (1 << 11)) {
9938 if (disas_thumb2_insn(env, s, insn))
9942 /* unconditional branch */
9943 val = (uint32_t)s->pc;
9944 offset = ((int32_t)insn << 21) >> 21;
9945 val += (offset << 1) + 2;
9950 if (disas_thumb2_insn(env, s, insn))
9956 gen_exception_insn(s, 4, EXCP_UDEF);
9960 gen_exception_insn(s, 2, EXCP_UDEF);
9963 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9964 basic block 'tb'. If search_pc is TRUE, also generate PC
9965 information for each intermediate instruction. */
9966 static inline void gen_intermediate_code_internal(CPUARMState *env,
9967 TranslationBlock *tb,
9970 DisasContext dc1, *dc = &dc1;
9972 uint16_t *gen_opc_end;
9974 target_ulong pc_start;
9975 uint32_t next_page_start;
9979 /* generate intermediate code */
9984 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9986 dc->is_jmp = DISAS_NEXT;
9988 dc->singlestep_enabled = env->singlestep_enabled;
9990 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9991 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9992 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9993 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9994 #if !defined(CONFIG_USER_ONLY)
9995 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9997 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9998 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9999 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
10000 cpu_F0s = tcg_temp_new_i32();
10001 cpu_F1s = tcg_temp_new_i32();
10002 cpu_F0d = tcg_temp_new_i64();
10003 cpu_F1d = tcg_temp_new_i64();
10006 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
10007 cpu_M0 = tcg_temp_new_i64();
10008 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
10011 max_insns = tb->cflags & CF_COUNT_MASK;
10012 if (max_insns == 0)
10013 max_insns = CF_COUNT_MASK;
10015 gen_icount_start();
10017 tcg_clear_temp_count();
10019 /* A note on handling of the condexec (IT) bits:
10021 * We want to avoid the overhead of having to write the updated condexec
10022 * bits back to the CPUARMState for every instruction in an IT block. So:
10023 * (1) if the condexec bits are not already zero then we write
10024 * zero back into the CPUARMState now. This avoids complications trying
10025 * to do it at the end of the block. (For example if we don't do this
10026 * it's hard to identify whether we can safely skip writing condexec
10027 * at the end of the TB, which we definitely want to do for the case
10028 * where a TB doesn't do anything with the IT state at all.)
10029 * (2) if we are going to leave the TB then we call gen_set_condexec()
10030 * which will write the correct value into CPUARMState if zero is wrong.
10031 * This is done both for leaving the TB at the end, and for leaving
10032 * it because of an exception we know will happen, which is done in
10033 * gen_exception_insn(). The latter is necessary because we need to
10034 * leave the TB with the PC/IT state just prior to execution of the
10035 * instruction which caused the exception.
10036 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
10037 * then the CPUARMState will be wrong and we need to reset it.
10038 * This is handled in the same way as restoration of the
10039 * PC in these situations: we will be called again with search_pc=1
10040 * and generate a mapping of the condexec bits for each PC in
10041 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10042 * this to restore the condexec bits.
10044 * Note that there are no instructions which can read the condexec
10045 * bits, and none which can write non-static values to them, so
10046 * we don't need to care about whether CPUARMState is correct in the
10050 /* Reset the conditional execution bits immediately. This avoids
10051 complications trying to do it at the end of the block. */
10052 if (dc->condexec_mask || dc->condexec_cond)
10054 TCGv tmp = tcg_temp_new_i32();
10055 tcg_gen_movi_i32(tmp, 0);
10056 store_cpu_field(tmp, condexec_bits);
10059 #ifdef CONFIG_USER_ONLY
10060 /* Intercept jump to the magic kernel page. */
10061 if (dc->pc >= 0xffff0000) {
10062 /* We always get here via a jump, so know we are not in a
10063 conditional execution block. */
10064 gen_exception(EXCP_KERNEL_TRAP);
10065 dc->is_jmp = DISAS_UPDATE;
10069 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10070 /* We always get here via a jump, so know we are not in a
10071 conditional execution block. */
10072 gen_exception(EXCP_EXCEPTION_EXIT);
10073 dc->is_jmp = DISAS_UPDATE;
10078 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10079 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
10080 if (bp->pc == dc->pc) {
10081 gen_exception_insn(dc, 0, EXCP_DEBUG);
10082 /* Advance PC so that clearing the breakpoint will
10083 invalidate this TB. */
10085 goto done_generating;
10091 j = gen_opc_ptr - gen_opc_buf;
10095 gen_opc_instr_start[lj++] = 0;
10097 gen_opc_pc[lj] = dc->pc;
10098 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
10099 gen_opc_instr_start[lj] = 1;
10100 gen_opc_icount[lj] = num_insns;
10103 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10106 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
10107 tcg_gen_debug_insn_start(dc->pc);
10111 disas_thumb_insn(env, dc);
10112 if (dc->condexec_mask) {
10113 dc->condexec_cond = (dc->condexec_cond & 0xe)
10114 | ((dc->condexec_mask >> 4) & 1);
10115 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10116 if (dc->condexec_mask == 0) {
10117 dc->condexec_cond = 0;
10121 disas_arm_insn(env, dc);
10124 if (dc->condjmp && !dc->is_jmp) {
10125 gen_set_label(dc->condlabel);
10129 if (tcg_check_temp_count()) {
10130 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
10133 /* Translation stops when a conditional branch is encountered.
10134 * Otherwise the subsequent code could get translated several times.
10135 * Also stop translation when a page boundary is reached. This
10136 * ensures prefetch aborts occur at the right place. */
10138 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
10139 !env->singlestep_enabled &&
10141 dc->pc < next_page_start &&
10142 num_insns < max_insns);
10144 if (tb->cflags & CF_LAST_IO) {
10146 /* FIXME: This can theoretically happen with self-modifying
10148 cpu_abort(env, "IO on conditional branch instruction");
10153 /* At this stage dc->condjmp will only be set when the skipped
10154 instruction was a conditional branch or trap, and the PC has
10155 already been written. */
10156 if (unlikely(env->singlestep_enabled)) {
10157 /* Make sure the pc is updated, and raise a debug exception. */
10159 gen_set_condexec(dc);
10160 if (dc->is_jmp == DISAS_SWI) {
10161 gen_exception(EXCP_SWI);
10163 gen_exception(EXCP_DEBUG);
10165 gen_set_label(dc->condlabel);
10167 if (dc->condjmp || !dc->is_jmp) {
10168 gen_set_pc_im(dc->pc);
10171 gen_set_condexec(dc);
10172 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
10173 gen_exception(EXCP_SWI);
10175 /* FIXME: Single stepping a WFI insn will not halt
10177 gen_exception(EXCP_DEBUG);
10180 /* While branches must always occur at the end of an IT block,
10181 there are a few other things that can cause us to terminate
10182 the TB in the middel of an IT block:
10183 - Exception generating instructions (bkpt, swi, undefined).
10185 - Hardware watchpoints.
10186 Hardware breakpoints have already been handled and skip this code.
10188 gen_set_condexec(dc);
10189 switch(dc->is_jmp) {
10191 gen_goto_tb(dc, 1, dc->pc);
10196 /* indicate that the hash table must be used to find the next TB */
10197 tcg_gen_exit_tb(0);
10199 case DISAS_TB_JUMP:
10200 /* nothing more to generate */
10206 gen_exception(EXCP_SWI);
10210 gen_set_label(dc->condlabel);
10211 gen_set_condexec(dc);
10212 gen_goto_tb(dc, 1, dc->pc);
10218 gen_icount_end(tb, num_insns);
10219 *gen_opc_ptr = INDEX_op_end;
10222 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
10223 qemu_log("----------------\n");
10224 qemu_log("IN: %s\n", lookup_symbol(pc_start));
10225 log_target_disas(pc_start, dc->pc - pc_start,
10226 dc->thumb | (dc->bswap_code << 1));
10231 j = gen_opc_ptr - gen_opc_buf;
10234 gen_opc_instr_start[lj++] = 0;
10236 tb->size = dc->pc - pc_start;
10237 tb->icount = num_insns;
10241 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10243 gen_intermediate_code_internal(env, tb, 0);
10246 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10248 gen_intermediate_code_internal(env, tb, 1);
10251 static const char *cpu_mode_names[16] = {
10252 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10253 "???", "???", "???", "und", "???", "???", "???", "sys"
10256 void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
10266 /* ??? This assumes float64 and double have the same layout.
10267 Oh well, it's only debug dumps. */
10275 for(i=0;i<16;i++) {
10276 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10278 cpu_fprintf(f, "\n");
10280 cpu_fprintf(f, " ");
10282 psr = cpsr_read(env);
10283 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10285 psr & (1 << 31) ? 'N' : '-',
10286 psr & (1 << 30) ? 'Z' : '-',
10287 psr & (1 << 29) ? 'C' : '-',
10288 psr & (1 << 28) ? 'V' : '-',
10289 psr & CPSR_T ? 'T' : 'A',
10290 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10293 for (i = 0; i < 16; i++) {
10294 d.d = env->vfp.regs[i];
10298 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10299 i * 2, (int)s0.i, s0.s,
10300 i * 2 + 1, (int)s1.i, s1.s,
10301 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
10304 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10308 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10310 env->regs[15] = gen_opc_pc[pc_pos];
10311 env->condexec_bits = gen_opc_condexec_bits[pc_pos];