4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
45 /* internal defines */
46 typedef struct DisasContext {
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
60 #if !defined(CONFIG_USER_ONLY)
65 #if defined(CONFIG_USER_ONLY)
68 #define IS_USER(s) (s->user)
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv cpu_V0, cpu_V1, cpu_M0;
80 /* FIXME: These should be removed. */
82 static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
84 #define ICOUNT_TEMP cpu_T[0]
85 #include "gen-icount.h"
87 /* initialize TCG globals. */
88 void arm_translate_init(void)
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
92 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
93 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
96 /* The code generator doesn't like lots of temporaries, so maintain our own
97 cache for reuse within a function. */
100 static TCGv temps[MAX_TEMPS];
102 /* Allocate a temporary variable. */
103 static TCGv new_tmp(void)
106 if (num_temps == MAX_TEMPS)
109 if (GET_TCGV(temps[num_temps]))
110 return temps[num_temps++];
112 tmp = tcg_temp_new(TCG_TYPE_I32);
113 temps[num_temps++] = tmp;
117 /* Release a temporary variable. */
118 static void dead_tmp(TCGv tmp)
123 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
126 /* Shuffle this temp to the last slot. */
127 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
129 while (i < num_temps) {
130 temps[i] = temps[i + 1];
136 static inline TCGv load_cpu_offset(int offset)
138 TCGv tmp = new_tmp();
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
143 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
145 static inline void store_cpu_offset(TCGv var, int offset)
147 tcg_gen_st_i32(var, cpu_env, offset);
151 #define store_cpu_field(var, name) \
152 store_cpu_offset(var, offsetof(CPUState, name))
154 /* Set a variable to the value of a CPU register. */
155 static void load_reg_var(DisasContext *s, TCGv var, int reg)
159 /* normaly, since we updated PC, we need only to add one insn */
161 addr = (long)s->pc + 2;
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
166 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
170 /* Create a new temporary and set it to the value of a CPU register. */
171 static inline TCGv load_reg(DisasContext *s, int reg)
173 TCGv tmp = new_tmp();
174 load_reg_var(s, tmp, reg);
178 /* Set a CPU register. The source must be a temporary and will be
180 static void store_reg(DisasContext *s, int reg, TCGv var)
183 tcg_gen_andi_i32(var, var, ~1);
184 s->is_jmp = DISAS_JUMP;
186 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
191 /* Basic operations. */
192 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
193 #define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
194 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
195 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
197 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
198 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
199 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
202 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
204 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
205 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
207 #define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
209 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
210 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
211 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
213 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
214 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
215 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
217 #define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
218 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
219 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
220 #define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
221 #define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
223 /* Value extensions. */
224 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
226 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
229 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
232 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
234 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235 /* Set NZCV flags from the high 4 bits of var. */
236 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
238 static void gen_exception(int excp)
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
246 static void gen_smul_dual(TCGv a, TCGv b)
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
250 tcg_gen_ext16s_i32(tmp1, a);
251 tcg_gen_ext16s_i32(tmp2, b);
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
261 /* Byteswap each halfword. */
262 static void gen_rev16(TCGv var)
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
273 /* Byteswap low halfword and sign extend. */
274 static void gen_revsh(TCGv var)
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
285 /* Unsigned bitfield extract. */
286 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
293 /* Signed bitfield extract. */
294 static void gen_sbfx(TCGv var, int shift, int width)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
308 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
309 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
311 tcg_gen_andi_i32(val, val, mask);
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
314 tcg_gen_or_i32(dest, base, val);
317 /* Round the top 32 bits of a 64-bit value. */
318 static void gen_roundqd(TCGv a, TCGv b)
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
324 /* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
326 /* 32x32->64 multiply. Marks inputs as dead. */
327 static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
329 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
330 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
332 tcg_gen_extu_i32_i64(tmp1, a);
334 tcg_gen_extu_i32_i64(tmp2, b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
340 static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
342 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
343 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
345 tcg_gen_ext_i32_i64(tmp1, a);
347 tcg_gen_ext_i32_i64(tmp2, b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
353 /* Unsigned 32x32->64 multiply. */
354 static void gen_op_mull_T0_T1(void)
356 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
357 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
367 /* Signed 32x32->64 multiply. */
368 static void gen_imull(TCGv a, TCGv b)
370 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
371 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
376 tcg_gen_trunc_i64_i32(a, tmp1);
377 tcg_gen_shri_i64(tmp1, tmp1, 32);
378 tcg_gen_trunc_i64_i32(b, tmp1);
380 #define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
382 /* Swap low and high halfwords. */
383 static void gen_swap_half(TCGv var)
385 TCGv tmp = new_tmp();
386 tcg_gen_shri_i32(tmp, var, 16);
387 tcg_gen_shli_i32(var, var, 16);
388 tcg_gen_or_i32(var, var, tmp);
392 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
393 tmp = (t0 ^ t1) & 0x8000;
396 t0 = (t0 + t1) ^ tmp;
399 static void gen_add16(TCGv t0, TCGv t1)
401 TCGv tmp = new_tmp();
402 tcg_gen_xor_i32(tmp, t0, t1);
403 tcg_gen_andi_i32(tmp, tmp, 0x8000);
404 tcg_gen_andi_i32(t0, t0, ~0x8000);
405 tcg_gen_andi_i32(t1, t1, ~0x8000);
406 tcg_gen_add_i32(t0, t0, t1);
407 tcg_gen_xor_i32(t0, t0, tmp);
412 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
414 /* Set CF to the top bit of var. */
415 static void gen_set_CF_bit31(TCGv var)
417 TCGv tmp = new_tmp();
418 tcg_gen_shri_i32(tmp, var, 31);
423 /* Set N and Z flags from var. */
424 static inline void gen_logic_CC(TCGv var)
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
427 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
431 static void gen_adc_T0_T1(void)
435 tmp = load_cpu_field(CF);
436 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
440 /* dest = T0 - T1 + CF - 1. */
441 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
444 tcg_gen_sub_i32(dest, t0, t1);
445 tmp = load_cpu_field(CF);
446 tcg_gen_add_i32(dest, dest, tmp);
447 tcg_gen_subi_i32(dest, dest, 1);
451 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
452 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
454 /* T0 &= ~T1. Clobbers T1. */
455 /* FIXME: Implement bic natively. */
456 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
458 TCGv tmp = new_tmp();
459 tcg_gen_not_i32(tmp, t1);
460 tcg_gen_and_i32(dest, t0, tmp);
463 static inline void gen_op_bicl_T0_T1(void)
469 /* FIXME: Implement this natively. */
470 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
472 /* FIXME: Implement this natively. */
473 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
481 tcg_gen_shri_i32(tmp, t1, i);
482 tcg_gen_shli_i32(t1, t1, 32 - i);
483 tcg_gen_or_i32(t0, t1, tmp);
487 static void shifter_out_im(TCGv var, int shift)
489 TCGv tmp = new_tmp();
491 tcg_gen_andi_i32(tmp, var, 1);
493 tcg_gen_shri_i32(tmp, var, shift);
495 tcg_gen_andi_i32(tmp, tmp, 1);
501 /* Shift by immediate. Includes special handling for shift == 0. */
502 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
508 shifter_out_im(var, 32 - shift);
509 tcg_gen_shli_i32(var, var, shift);
515 tcg_gen_shri_i32(var, var, 31);
518 tcg_gen_movi_i32(var, 0);
521 shifter_out_im(var, shift - 1);
522 tcg_gen_shri_i32(var, var, shift);
529 shifter_out_im(var, shift - 1);
532 tcg_gen_sari_i32(var, var, shift);
534 case 3: /* ROR/RRX */
537 shifter_out_im(var, shift - 1);
538 tcg_gen_rori_i32(var, var, shift); break;
540 TCGv tmp = load_cpu_field(CF);
542 shifter_out_im(var, 0);
543 tcg_gen_shri_i32(var, var, 1);
544 tcg_gen_shli_i32(tmp, tmp, 31);
545 tcg_gen_or_i32(var, var, tmp);
551 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
552 TCGv shift, int flags)
556 case 0: gen_helper_shl_cc(var, var, shift); break;
557 case 1: gen_helper_shr_cc(var, var, shift); break;
558 case 2: gen_helper_sar_cc(var, var, shift); break;
559 case 3: gen_helper_ror_cc(var, var, shift); break;
563 case 0: gen_helper_shl(var, var, shift); break;
564 case 1: gen_helper_shr(var, var, shift); break;
565 case 2: gen_helper_sar(var, var, shift); break;
566 case 3: gen_helper_ror(var, var, shift); break;
572 #define PAS_OP(pfx) \
574 case 0: gen_pas_helper(glue(pfx,add16)); break; \
575 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
576 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
577 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
578 case 4: gen_pas_helper(glue(pfx,add8)); break; \
579 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
581 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
586 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
588 tmp = tcg_temp_new(TCG_TYPE_PTR);
589 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 tmp = tcg_temp_new(TCG_TYPE_PTR);
594 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
597 #undef gen_pas_helper
598 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
611 #undef gen_pas_helper
616 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
617 #define PAS_OP(pfx) \
619 case 0: gen_pas_helper(glue(pfx,add8)); break; \
620 case 1: gen_pas_helper(glue(pfx,add16)); break; \
621 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
622 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
623 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
624 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
626 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
631 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
633 tmp = tcg_temp_new(TCG_TYPE_PTR);
634 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
638 tmp = tcg_temp_new(TCG_TYPE_PTR);
639 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
642 #undef gen_pas_helper
643 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
656 #undef gen_pas_helper
661 static void gen_test_cc(int cc, int label)
669 tmp = load_cpu_field(ZF);
670 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
673 tmp = load_cpu_field(ZF);
674 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
677 tmp = load_cpu_field(CF);
678 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
681 tmp = load_cpu_field(CF);
682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
685 tmp = load_cpu_field(NF);
686 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
689 tmp = load_cpu_field(NF);
690 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
693 tmp = load_cpu_field(VF);
694 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
697 tmp = load_cpu_field(VF);
698 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
700 case 8: /* hi: C && !Z */
701 inv = gen_new_label();
702 tmp = load_cpu_field(CF);
703 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
705 tmp = load_cpu_field(ZF);
706 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
709 case 9: /* ls: !C || Z */
710 tmp = load_cpu_field(CF);
711 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
713 tmp = load_cpu_field(ZF);
714 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
716 case 10: /* ge: N == V -> N ^ V == 0 */
717 tmp = load_cpu_field(VF);
718 tmp2 = load_cpu_field(NF);
719 tcg_gen_xor_i32(tmp, tmp, tmp2);
721 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
723 case 11: /* lt: N != V -> N ^ V != 0 */
724 tmp = load_cpu_field(VF);
725 tmp2 = load_cpu_field(NF);
726 tcg_gen_xor_i32(tmp, tmp, tmp2);
728 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
730 case 12: /* gt: !Z && N == V */
731 inv = gen_new_label();
732 tmp = load_cpu_field(ZF);
733 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
735 tmp = load_cpu_field(VF);
736 tmp2 = load_cpu_field(NF);
737 tcg_gen_xor_i32(tmp, tmp, tmp2);
739 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
742 case 13: /* le: Z || N != V */
743 tmp = load_cpu_field(ZF);
744 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
746 tmp = load_cpu_field(VF);
747 tmp2 = load_cpu_field(NF);
748 tcg_gen_xor_i32(tmp, tmp, tmp2);
750 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
753 fprintf(stderr, "Bad condition code 0x%x\n", cc);
759 const uint8_t table_logic_cc[16] = {
778 /* Set PC and Thumb state from an immediate address. */
779 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
783 s->is_jmp = DISAS_UPDATE;
785 if (s->thumb != (addr & 1)) {
786 tcg_gen_movi_i32(tmp, addr & 1);
787 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
789 tcg_gen_movi_i32(tmp, addr & ~1);
790 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
794 /* Set PC and Thumb state from var. var is marked as dead. */
795 static inline void gen_bx(DisasContext *s, TCGv var)
799 s->is_jmp = DISAS_UPDATE;
801 tcg_gen_andi_i32(tmp, var, 1);
802 store_cpu_field(tmp, thumb);
803 tcg_gen_andi_i32(var, var, ~1);
804 store_cpu_field(var, regs[15]);
807 /* TODO: This should be removed. Use gen_bx instead. */
808 static inline void gen_bx_T0(DisasContext *s)
810 TCGv tmp = new_tmp();
811 tcg_gen_mov_i32(tmp, cpu_T[0]);
815 #if defined(CONFIG_USER_ONLY)
816 #define gen_ldst(name, s) gen_op_##name##_raw()
818 #define gen_ldst(name, s) do { \
821 gen_op_##name##_user(); \
823 gen_op_##name##_kernel(); \
826 static inline TCGv gen_ld8s(TCGv addr, int index)
828 TCGv tmp = new_tmp();
829 tcg_gen_qemu_ld8s(tmp, addr, index);
832 static inline TCGv gen_ld8u(TCGv addr, int index)
834 TCGv tmp = new_tmp();
835 tcg_gen_qemu_ld8u(tmp, addr, index);
838 static inline TCGv gen_ld16s(TCGv addr, int index)
840 TCGv tmp = new_tmp();
841 tcg_gen_qemu_ld16s(tmp, addr, index);
844 static inline TCGv gen_ld16u(TCGv addr, int index)
846 TCGv tmp = new_tmp();
847 tcg_gen_qemu_ld16u(tmp, addr, index);
850 static inline TCGv gen_ld32(TCGv addr, int index)
852 TCGv tmp = new_tmp();
853 tcg_gen_qemu_ld32u(tmp, addr, index);
856 static inline void gen_st8(TCGv val, TCGv addr, int index)
858 tcg_gen_qemu_st8(val, addr, index);
861 static inline void gen_st16(TCGv val, TCGv addr, int index)
863 tcg_gen_qemu_st16(val, addr, index);
866 static inline void gen_st32(TCGv val, TCGv addr, int index)
868 tcg_gen_qemu_st32(val, addr, index);
872 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
874 load_reg_var(s, cpu_T[0], reg);
877 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
879 load_reg_var(s, cpu_T[1], reg);
882 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
884 load_reg_var(s, cpu_T[2], reg);
887 static inline void gen_set_pc_im(uint32_t val)
889 TCGv tmp = new_tmp();
890 tcg_gen_movi_i32(tmp, val);
891 store_cpu_field(tmp, regs[15]);
894 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
899 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
903 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
906 s->is_jmp = DISAS_JUMP;
910 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
912 gen_movl_reg_TN(s, reg, 0);
915 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
917 gen_movl_reg_TN(s, reg, 1);
920 /* Force a TB lookup after an instruction that changes the CPU state. */
921 static inline void gen_lookup_tb(DisasContext *s)
923 gen_op_movl_T0_im(s->pc);
924 gen_movl_reg_T0(s, 15);
925 s->is_jmp = DISAS_UPDATE;
928 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
931 int val, rm, shift, shiftop;
934 if (!(insn & (1 << 25))) {
937 if (!(insn & (1 << 23)))
940 tcg_gen_addi_i32(var, var, val);
944 shift = (insn >> 7) & 0x1f;
945 shiftop = (insn >> 5) & 3;
946 offset = load_reg(s, rm);
947 gen_arm_shift_im(offset, shiftop, shift, 0);
948 if (!(insn & (1 << 23)))
949 tcg_gen_sub_i32(var, var, offset);
951 tcg_gen_add_i32(var, var, offset);
956 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
962 if (insn & (1 << 22)) {
964 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
965 if (!(insn & (1 << 23)))
969 tcg_gen_addi_i32(var, var, val);
973 tcg_gen_addi_i32(var, var, extra);
975 offset = load_reg(s, rm);
976 if (!(insn & (1 << 23)))
977 tcg_gen_sub_i32(var, var, offset);
979 tcg_gen_add_i32(var, var, offset);
984 #define VFP_OP2(name) \
985 static inline void gen_vfp_##name(int dp) \
988 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
990 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
993 #define VFP_OP1(name) \
994 static inline void gen_vfp_##name(int dp, int arg) \
997 gen_op_vfp_##name##d(arg); \
999 gen_op_vfp_##name##s(arg); \
1009 static inline void gen_vfp_abs(int dp)
1012 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1014 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1017 static inline void gen_vfp_neg(int dp)
1020 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1022 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1025 static inline void gen_vfp_sqrt(int dp)
1028 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1030 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1033 static inline void gen_vfp_cmp(int dp)
1036 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1038 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1041 static inline void gen_vfp_cmpe(int dp)
1044 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1046 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1049 static inline void gen_vfp_F1_ld0(int dp)
1052 tcg_gen_movi_i64(cpu_F1d, 0);
1054 tcg_gen_movi_i32(cpu_F1s, 0);
1057 static inline void gen_vfp_uito(int dp)
1060 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1062 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1065 static inline void gen_vfp_sito(int dp)
1068 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1070 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1073 static inline void gen_vfp_toui(int dp)
1076 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1078 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1081 static inline void gen_vfp_touiz(int dp)
1084 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1086 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1089 static inline void gen_vfp_tosi(int dp)
1092 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1094 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1097 static inline void gen_vfp_tosiz(int dp)
1100 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1102 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1105 #define VFP_GEN_FIX(name) \
1106 static inline void gen_vfp_##name(int dp, int shift) \
1109 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1111 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1123 static inline void gen_vfp_ld(DisasContext *s, int dp)
1126 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1128 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1131 static inline void gen_vfp_st(DisasContext *s, int dp)
1134 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1136 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1140 vfp_reg_offset (int dp, int reg)
1143 return offsetof(CPUARMState, vfp.regs[reg]);
1145 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1146 + offsetof(CPU_DoubleU, l.upper);
1148 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1149 + offsetof(CPU_DoubleU, l.lower);
1153 /* Return the offset of a 32-bit piece of a NEON register.
1154 zero is the least significant end of the register. */
1156 neon_reg_offset (int reg, int n)
1160 return vfp_reg_offset(0, sreg);
1163 /* FIXME: Remove these. */
1164 #define neon_T0 cpu_T[0]
1165 #define neon_T1 cpu_T[1]
1166 #define NEON_GET_REG(T, reg, n) \
1167 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1168 #define NEON_SET_REG(T, reg, n) \
1169 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1171 static TCGv neon_load_reg(int reg, int pass)
1173 TCGv tmp = new_tmp();
1174 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1178 static void neon_store_reg(int reg, int pass, TCGv var)
1180 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1184 static inline void neon_load_reg64(TCGv var, int reg)
1186 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1189 static inline void neon_store_reg64(TCGv var, int reg)
1191 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1194 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1195 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1196 #define tcg_gen_st_f32 tcg_gen_st_i32
1197 #define tcg_gen_st_f64 tcg_gen_st_i64
1199 static inline void gen_mov_F0_vreg(int dp, int reg)
1202 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1204 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1207 static inline void gen_mov_F1_vreg(int dp, int reg)
1210 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1212 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1215 static inline void gen_mov_vreg_F0(int dp, int reg)
1218 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1220 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1223 #define ARM_CP_RW_BIT (1 << 20)
1225 static inline void iwmmxt_load_reg(TCGv var, int reg)
1227 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1230 static inline void iwmmxt_store_reg(TCGv var, int reg)
1232 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1235 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1237 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1240 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1242 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1245 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1247 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1250 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1252 iwmmxt_store_reg(cpu_M0, rn);
1255 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1257 iwmmxt_load_reg(cpu_M0, rn);
1260 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1262 iwmmxt_load_reg(cpu_V1, rn);
1263 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1266 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1268 iwmmxt_load_reg(cpu_V1, rn);
1269 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1272 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1274 iwmmxt_load_reg(cpu_V1, rn);
1275 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1278 #define IWMMXT_OP(name) \
1279 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1281 iwmmxt_load_reg(cpu_V1, rn); \
1282 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1285 #define IWMMXT_OP_ENV(name) \
1286 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1288 iwmmxt_load_reg(cpu_V1, rn); \
1289 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1292 #define IWMMXT_OP_ENV_SIZE(name) \
1293 IWMMXT_OP_ENV(name##b) \
1294 IWMMXT_OP_ENV(name##w) \
1295 IWMMXT_OP_ENV(name##l)
1297 #define IWMMXT_OP_ENV1(name) \
1298 static inline void gen_op_iwmmxt_##name##_M0(void) \
1300 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1314 IWMMXT_OP_ENV_SIZE(unpackl)
1315 IWMMXT_OP_ENV_SIZE(unpackh)
1317 IWMMXT_OP_ENV1(unpacklub)
1318 IWMMXT_OP_ENV1(unpackluw)
1319 IWMMXT_OP_ENV1(unpacklul)
1320 IWMMXT_OP_ENV1(unpackhub)
1321 IWMMXT_OP_ENV1(unpackhuw)
1322 IWMMXT_OP_ENV1(unpackhul)
1323 IWMMXT_OP_ENV1(unpacklsb)
1324 IWMMXT_OP_ENV1(unpacklsw)
1325 IWMMXT_OP_ENV1(unpacklsl)
1326 IWMMXT_OP_ENV1(unpackhsb)
1327 IWMMXT_OP_ENV1(unpackhsw)
1328 IWMMXT_OP_ENV1(unpackhsl)
1330 IWMMXT_OP_ENV_SIZE(cmpeq)
1331 IWMMXT_OP_ENV_SIZE(cmpgtu)
1332 IWMMXT_OP_ENV_SIZE(cmpgts)
1334 IWMMXT_OP_ENV_SIZE(mins)
1335 IWMMXT_OP_ENV_SIZE(minu)
1336 IWMMXT_OP_ENV_SIZE(maxs)
1337 IWMMXT_OP_ENV_SIZE(maxu)
1339 IWMMXT_OP_ENV_SIZE(subn)
1340 IWMMXT_OP_ENV_SIZE(addn)
1341 IWMMXT_OP_ENV_SIZE(subu)
1342 IWMMXT_OP_ENV_SIZE(addu)
1343 IWMMXT_OP_ENV_SIZE(subs)
1344 IWMMXT_OP_ENV_SIZE(adds)
1346 IWMMXT_OP_ENV(avgb0)
1347 IWMMXT_OP_ENV(avgb1)
1348 IWMMXT_OP_ENV(avgw0)
1349 IWMMXT_OP_ENV(avgw1)
1353 IWMMXT_OP_ENV(packuw)
1354 IWMMXT_OP_ENV(packul)
1355 IWMMXT_OP_ENV(packuq)
1356 IWMMXT_OP_ENV(packsw)
1357 IWMMXT_OP_ENV(packsl)
1358 IWMMXT_OP_ENV(packsq)
1360 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1362 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1365 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1367 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1370 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1372 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1375 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1377 iwmmxt_load_reg(cpu_V1, rn);
1378 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1381 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1383 TCGv tmp = tcg_const_i32(shift);
1384 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1387 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1389 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1390 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1391 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1394 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1396 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1397 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1398 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1401 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1403 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1404 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1406 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1409 static void gen_op_iwmmxt_set_mup(void)
1412 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1413 tcg_gen_ori_i32(tmp, tmp, 2);
1414 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1417 static void gen_op_iwmmxt_set_cup(void)
1420 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1421 tcg_gen_ori_i32(tmp, tmp, 1);
1422 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1425 static void gen_op_iwmmxt_setpsr_nz(void)
1427 TCGv tmp = new_tmp();
1428 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1429 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1432 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1434 iwmmxt_load_reg(cpu_V1, rn);
1435 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1436 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1440 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1442 iwmmxt_load_reg(cpu_V0, rn);
1443 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1444 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1445 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1448 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1450 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1451 iwmmxt_store_reg(cpu_V0, rn);
1454 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1459 rd = (insn >> 16) & 0xf;
1460 gen_movl_T1_reg(s, rd);
1462 offset = (insn & 0xff) << ((insn >> 7) & 2);
1463 if (insn & (1 << 24)) {
1465 if (insn & (1 << 23))
1466 gen_op_addl_T1_im(offset);
1468 gen_op_addl_T1_im(-offset);
1470 if (insn & (1 << 21))
1471 gen_movl_reg_T1(s, rd);
1472 } else if (insn & (1 << 21)) {
1474 if (insn & (1 << 23))
1475 gen_op_movl_T0_im(offset);
1477 gen_op_movl_T0_im(- offset);
1478 gen_op_addl_T0_T1();
1479 gen_movl_reg_T0(s, rd);
1480 } else if (!(insn & (1 << 23)))
1485 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1487 int rd = (insn >> 0) & 0xf;
1489 if (insn & (1 << 8))
1490 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1493 gen_op_iwmmxt_movl_T0_wCx(rd);
1495 gen_iwmmxt_movl_T0_T1_wRn(rd);
1497 gen_op_movl_T1_im(mask);
1498 gen_op_andl_T0_T1();
1502 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1503 (ie. an undefined instruction). */
1504 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1507 int rdhi, rdlo, rd0, rd1, i;
1510 if ((insn & 0x0e000e00) == 0x0c000000) {
1511 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1513 rdlo = (insn >> 12) & 0xf;
1514 rdhi = (insn >> 16) & 0xf;
1515 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1516 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1517 gen_movl_reg_T0(s, rdlo);
1518 gen_movl_reg_T1(s, rdhi);
1519 } else { /* TMCRR */
1520 gen_movl_T0_reg(s, rdlo);
1521 gen_movl_T1_reg(s, rdhi);
1522 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1523 gen_op_iwmmxt_set_mup();
1528 wrd = (insn >> 12) & 0xf;
1529 if (gen_iwmmxt_address(s, insn))
1531 if (insn & ARM_CP_RW_BIT) {
1532 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1533 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1534 tcg_gen_mov_i32(cpu_T[0], tmp);
1536 gen_op_iwmmxt_movl_wCx_T0(wrd);
1539 if (insn & (1 << 8)) {
1540 if (insn & (1 << 22)) { /* WLDRD */
1541 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1543 } else { /* WLDRW wRd */
1544 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1547 if (insn & (1 << 22)) { /* WLDRH */
1548 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1549 } else { /* WLDRB */
1550 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1554 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1557 gen_op_iwmmxt_movq_wRn_M0(wrd);
1560 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1561 gen_op_iwmmxt_movl_T0_wCx(wrd);
1563 tcg_gen_mov_i32(tmp, cpu_T[0]);
1564 gen_st32(tmp, cpu_T[1], IS_USER(s));
1566 gen_op_iwmmxt_movq_M0_wRn(wrd);
1568 if (insn & (1 << 8)) {
1569 if (insn & (1 << 22)) { /* WSTRD */
1571 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1572 } else { /* WSTRW wRd */
1573 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1574 gen_st32(tmp, cpu_T[1], IS_USER(s));
1577 if (insn & (1 << 22)) { /* WSTRH */
1578 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1579 gen_st16(tmp, cpu_T[1], IS_USER(s));
1580 } else { /* WSTRB */
1581 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1582 gen_st8(tmp, cpu_T[1], IS_USER(s));
1590 if ((insn & 0x0f000000) != 0x0e000000)
1593 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1594 case 0x000: /* WOR */
1595 wrd = (insn >> 12) & 0xf;
1596 rd0 = (insn >> 0) & 0xf;
1597 rd1 = (insn >> 16) & 0xf;
1598 gen_op_iwmmxt_movq_M0_wRn(rd0);
1599 gen_op_iwmmxt_orq_M0_wRn(rd1);
1600 gen_op_iwmmxt_setpsr_nz();
1601 gen_op_iwmmxt_movq_wRn_M0(wrd);
1602 gen_op_iwmmxt_set_mup();
1603 gen_op_iwmmxt_set_cup();
1605 case 0x011: /* TMCR */
1608 rd = (insn >> 12) & 0xf;
1609 wrd = (insn >> 16) & 0xf;
1611 case ARM_IWMMXT_wCID:
1612 case ARM_IWMMXT_wCASF:
1614 case ARM_IWMMXT_wCon:
1615 gen_op_iwmmxt_set_cup();
1617 case ARM_IWMMXT_wCSSF:
1618 gen_op_iwmmxt_movl_T0_wCx(wrd);
1619 gen_movl_T1_reg(s, rd);
1620 gen_op_bicl_T0_T1();
1621 gen_op_iwmmxt_movl_wCx_T0(wrd);
1623 case ARM_IWMMXT_wCGR0:
1624 case ARM_IWMMXT_wCGR1:
1625 case ARM_IWMMXT_wCGR2:
1626 case ARM_IWMMXT_wCGR3:
1627 gen_op_iwmmxt_set_cup();
1628 gen_movl_reg_T0(s, rd);
1629 gen_op_iwmmxt_movl_wCx_T0(wrd);
1635 case 0x100: /* WXOR */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 0) & 0xf;
1638 rd1 = (insn >> 16) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1641 gen_op_iwmmxt_setpsr_nz();
1642 gen_op_iwmmxt_movq_wRn_M0(wrd);
1643 gen_op_iwmmxt_set_mup();
1644 gen_op_iwmmxt_set_cup();
1646 case 0x111: /* TMRC */
1649 rd = (insn >> 12) & 0xf;
1650 wrd = (insn >> 16) & 0xf;
1651 gen_op_iwmmxt_movl_T0_wCx(wrd);
1652 gen_movl_reg_T0(s, rd);
1654 case 0x300: /* WANDN */
1655 wrd = (insn >> 12) & 0xf;
1656 rd0 = (insn >> 0) & 0xf;
1657 rd1 = (insn >> 16) & 0xf;
1658 gen_op_iwmmxt_movq_M0_wRn(rd0);
1659 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1660 gen_op_iwmmxt_andq_M0_wRn(rd1);
1661 gen_op_iwmmxt_setpsr_nz();
1662 gen_op_iwmmxt_movq_wRn_M0(wrd);
1663 gen_op_iwmmxt_set_mup();
1664 gen_op_iwmmxt_set_cup();
1666 case 0x200: /* WAND */
1667 wrd = (insn >> 12) & 0xf;
1668 rd0 = (insn >> 0) & 0xf;
1669 rd1 = (insn >> 16) & 0xf;
1670 gen_op_iwmmxt_movq_M0_wRn(rd0);
1671 gen_op_iwmmxt_andq_M0_wRn(rd1);
1672 gen_op_iwmmxt_setpsr_nz();
1673 gen_op_iwmmxt_movq_wRn_M0(wrd);
1674 gen_op_iwmmxt_set_mup();
1675 gen_op_iwmmxt_set_cup();
1677 case 0x810: case 0xa10: /* WMADD */
1678 wrd = (insn >> 12) & 0xf;
1679 rd0 = (insn >> 0) & 0xf;
1680 rd1 = (insn >> 16) & 0xf;
1681 gen_op_iwmmxt_movq_M0_wRn(rd0);
1682 if (insn & (1 << 21))
1683 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1685 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1689 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1690 wrd = (insn >> 12) & 0xf;
1691 rd0 = (insn >> 16) & 0xf;
1692 rd1 = (insn >> 0) & 0xf;
1693 gen_op_iwmmxt_movq_M0_wRn(rd0);
1694 switch ((insn >> 22) & 3) {
1696 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1699 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1702 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1707 gen_op_iwmmxt_movq_wRn_M0(wrd);
1708 gen_op_iwmmxt_set_mup();
1709 gen_op_iwmmxt_set_cup();
1711 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1712 wrd = (insn >> 12) & 0xf;
1713 rd0 = (insn >> 16) & 0xf;
1714 rd1 = (insn >> 0) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0);
1716 switch ((insn >> 22) & 3) {
1718 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1721 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1724 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1731 gen_op_iwmmxt_set_cup();
1733 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1734 wrd = (insn >> 12) & 0xf;
1735 rd0 = (insn >> 16) & 0xf;
1736 rd1 = (insn >> 0) & 0xf;
1737 gen_op_iwmmxt_movq_M0_wRn(rd0);
1738 if (insn & (1 << 22))
1739 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1741 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1742 if (!(insn & (1 << 20)))
1743 gen_op_iwmmxt_addl_M0_wRn(wrd);
1744 gen_op_iwmmxt_movq_wRn_M0(wrd);
1745 gen_op_iwmmxt_set_mup();
1747 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1748 wrd = (insn >> 12) & 0xf;
1749 rd0 = (insn >> 16) & 0xf;
1750 rd1 = (insn >> 0) & 0xf;
1751 gen_op_iwmmxt_movq_M0_wRn(rd0);
1752 if (insn & (1 << 21)) {
1753 if (insn & (1 << 20))
1754 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1756 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1758 if (insn & (1 << 20))
1759 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1761 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1763 gen_op_iwmmxt_movq_wRn_M0(wrd);
1764 gen_op_iwmmxt_set_mup();
1766 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1767 wrd = (insn >> 12) & 0xf;
1768 rd0 = (insn >> 16) & 0xf;
1769 rd1 = (insn >> 0) & 0xf;
1770 gen_op_iwmmxt_movq_M0_wRn(rd0);
1771 if (insn & (1 << 21))
1772 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1774 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1775 if (!(insn & (1 << 20))) {
1776 iwmmxt_load_reg(cpu_V1, wrd);
1777 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1782 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1783 wrd = (insn >> 12) & 0xf;
1784 rd0 = (insn >> 16) & 0xf;
1785 rd1 = (insn >> 0) & 0xf;
1786 gen_op_iwmmxt_movq_M0_wRn(rd0);
1787 switch ((insn >> 22) & 3) {
1789 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1792 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1795 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1800 gen_op_iwmmxt_movq_wRn_M0(wrd);
1801 gen_op_iwmmxt_set_mup();
1802 gen_op_iwmmxt_set_cup();
1804 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 rd1 = (insn >> 0) & 0xf;
1808 gen_op_iwmmxt_movq_M0_wRn(rd0);
1809 if (insn & (1 << 22)) {
1810 if (insn & (1 << 20))
1811 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1813 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1815 if (insn & (1 << 20))
1816 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1818 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1820 gen_op_iwmmxt_movq_wRn_M0(wrd);
1821 gen_op_iwmmxt_set_mup();
1822 gen_op_iwmmxt_set_cup();
1824 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1825 wrd = (insn >> 12) & 0xf;
1826 rd0 = (insn >> 16) & 0xf;
1827 rd1 = (insn >> 0) & 0xf;
1828 gen_op_iwmmxt_movq_M0_wRn(rd0);
1829 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1830 gen_op_movl_T1_im(7);
1831 gen_op_andl_T0_T1();
1832 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1833 gen_op_iwmmxt_movq_wRn_M0(wrd);
1834 gen_op_iwmmxt_set_mup();
1836 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1837 rd = (insn >> 12) & 0xf;
1838 wrd = (insn >> 16) & 0xf;
1839 gen_movl_T0_reg(s, rd);
1840 gen_op_iwmmxt_movq_M0_wRn(wrd);
1841 switch ((insn >> 6) & 3) {
1843 gen_op_movl_T1_im(0xff);
1844 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1847 gen_op_movl_T1_im(0xffff);
1848 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1851 gen_op_movl_T1_im(0xffffffff);
1852 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1857 gen_op_iwmmxt_movq_wRn_M0(wrd);
1858 gen_op_iwmmxt_set_mup();
1860 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1861 rd = (insn >> 12) & 0xf;
1862 wrd = (insn >> 16) & 0xf;
1865 gen_op_iwmmxt_movq_M0_wRn(wrd);
1866 switch ((insn >> 22) & 3) {
1869 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1871 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1876 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1878 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1882 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1887 gen_movl_reg_T0(s, rd);
1889 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1890 if ((insn & 0x000ff008) != 0x0003f000)
1892 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1893 switch ((insn >> 22) & 3) {
1895 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1898 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1901 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1906 gen_op_shll_T1_im(28);
1907 gen_set_nzcv(cpu_T[1]);
1909 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1910 rd = (insn >> 12) & 0xf;
1911 wrd = (insn >> 16) & 0xf;
1912 gen_movl_T0_reg(s, rd);
1913 switch ((insn >> 6) & 3) {
1915 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1918 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1921 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1926 gen_op_iwmmxt_movq_wRn_M0(wrd);
1927 gen_op_iwmmxt_set_mup();
1929 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1930 if ((insn & 0x000ff00f) != 0x0003f000)
1932 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1933 switch ((insn >> 22) & 3) {
1935 for (i = 0; i < 7; i ++) {
1936 gen_op_shll_T1_im(4);
1937 gen_op_andl_T0_T1();
1941 for (i = 0; i < 3; i ++) {
1942 gen_op_shll_T1_im(8);
1943 gen_op_andl_T0_T1();
1947 gen_op_shll_T1_im(16);
1948 gen_op_andl_T0_T1();
1953 gen_set_nzcv(cpu_T[0]);
1955 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1956 wrd = (insn >> 12) & 0xf;
1957 rd0 = (insn >> 16) & 0xf;
1958 gen_op_iwmmxt_movq_M0_wRn(rd0);
1959 switch ((insn >> 22) & 3) {
1961 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1964 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1967 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1975 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1976 if ((insn & 0x000ff00f) != 0x0003f000)
1978 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1979 switch ((insn >> 22) & 3) {
1981 for (i = 0; i < 7; i ++) {
1982 gen_op_shll_T1_im(4);
1987 for (i = 0; i < 3; i ++) {
1988 gen_op_shll_T1_im(8);
1993 gen_op_shll_T1_im(16);
1999 gen_set_nzcv(cpu_T[0]);
2001 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2002 rd = (insn >> 12) & 0xf;
2003 rd0 = (insn >> 16) & 0xf;
2004 if ((insn & 0xf) != 0)
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 switch ((insn >> 22) & 3) {
2009 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
2012 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
2015 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
2020 gen_movl_reg_T0(s, rd);
2022 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2023 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 rd1 = (insn >> 0) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 switch ((insn >> 22) & 3) {
2030 if (insn & (1 << 21))
2031 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2033 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2039 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2045 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2054 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2055 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 switch ((insn >> 22) & 3) {
2061 if (insn & (1 << 21))
2062 gen_op_iwmmxt_unpacklsb_M0();
2064 gen_op_iwmmxt_unpacklub_M0();
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpacklsw_M0();
2070 gen_op_iwmmxt_unpackluw_M0();
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpacklsl_M0();
2076 gen_op_iwmmxt_unpacklul_M0();
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2085 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2086 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
2090 switch ((insn >> 22) & 3) {
2092 if (insn & (1 << 21))
2093 gen_op_iwmmxt_unpackhsb_M0();
2095 gen_op_iwmmxt_unpackhub_M0();
2098 if (insn & (1 << 21))
2099 gen_op_iwmmxt_unpackhsw_M0();
2101 gen_op_iwmmxt_unpackhuw_M0();
2104 if (insn & (1 << 21))
2105 gen_op_iwmmxt_unpackhsl_M0();
2107 gen_op_iwmmxt_unpackhul_M0();
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 gen_op_iwmmxt_set_cup();
2116 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2117 case 0x214: case 0x614: case 0xa14: case 0xe14:
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
2121 if (gen_iwmmxt_shift(insn, 0xff))
2123 switch ((insn >> 22) & 3) {
2127 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2130 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2133 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2140 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2141 case 0x014: case 0x414: case 0x814: case 0xc14:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 if (gen_iwmmxt_shift(insn, 0xff))
2147 switch ((insn >> 22) & 3) {
2151 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2154 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2157 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2160 gen_op_iwmmxt_movq_wRn_M0(wrd);
2161 gen_op_iwmmxt_set_mup();
2162 gen_op_iwmmxt_set_cup();
2164 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2165 case 0x114: case 0x514: case 0x914: case 0xd14:
2166 wrd = (insn >> 12) & 0xf;
2167 rd0 = (insn >> 16) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 if (gen_iwmmxt_shift(insn, 0xff))
2171 switch ((insn >> 22) & 3) {
2175 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2178 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2181 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2184 gen_op_iwmmxt_movq_wRn_M0(wrd);
2185 gen_op_iwmmxt_set_mup();
2186 gen_op_iwmmxt_set_cup();
2188 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2189 case 0x314: case 0x714: case 0xb14: case 0xf14:
2190 wrd = (insn >> 12) & 0xf;
2191 rd0 = (insn >> 16) & 0xf;
2192 gen_op_iwmmxt_movq_M0_wRn(rd0);
2193 switch ((insn >> 22) & 3) {
2197 if (gen_iwmmxt_shift(insn, 0xf))
2199 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2202 if (gen_iwmmxt_shift(insn, 0x1f))
2204 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2207 if (gen_iwmmxt_shift(insn, 0x3f))
2209 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2212 gen_op_iwmmxt_movq_wRn_M0(wrd);
2213 gen_op_iwmmxt_set_mup();
2214 gen_op_iwmmxt_set_cup();
2216 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2217 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2218 wrd = (insn >> 12) & 0xf;
2219 rd0 = (insn >> 16) & 0xf;
2220 rd1 = (insn >> 0) & 0xf;
2221 gen_op_iwmmxt_movq_M0_wRn(rd0);
2222 switch ((insn >> 22) & 3) {
2224 if (insn & (1 << 21))
2225 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2227 gen_op_iwmmxt_minub_M0_wRn(rd1);
2230 if (insn & (1 << 21))
2231 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2233 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2236 if (insn & (1 << 21))
2237 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2239 gen_op_iwmmxt_minul_M0_wRn(rd1);
2244 gen_op_iwmmxt_movq_wRn_M0(wrd);
2245 gen_op_iwmmxt_set_mup();
2247 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2248 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2249 wrd = (insn >> 12) & 0xf;
2250 rd0 = (insn >> 16) & 0xf;
2251 rd1 = (insn >> 0) & 0xf;
2252 gen_op_iwmmxt_movq_M0_wRn(rd0);
2253 switch ((insn >> 22) & 3) {
2255 if (insn & (1 << 21))
2256 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2258 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2261 if (insn & (1 << 21))
2262 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2264 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2267 if (insn & (1 << 21))
2268 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2270 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2275 gen_op_iwmmxt_movq_wRn_M0(wrd);
2276 gen_op_iwmmxt_set_mup();
2278 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2279 case 0x402: case 0x502: case 0x602: case 0x702:
2280 wrd = (insn >> 12) & 0xf;
2281 rd0 = (insn >> 16) & 0xf;
2282 rd1 = (insn >> 0) & 0xf;
2283 gen_op_iwmmxt_movq_M0_wRn(rd0);
2284 gen_op_movl_T0_im((insn >> 20) & 3);
2285 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2286 gen_op_iwmmxt_movq_wRn_M0(wrd);
2287 gen_op_iwmmxt_set_mup();
2289 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2290 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2291 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2292 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2293 wrd = (insn >> 12) & 0xf;
2294 rd0 = (insn >> 16) & 0xf;
2295 rd1 = (insn >> 0) & 0xf;
2296 gen_op_iwmmxt_movq_M0_wRn(rd0);
2297 switch ((insn >> 20) & 0xf) {
2299 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2302 gen_op_iwmmxt_subub_M0_wRn(rd1);
2305 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2308 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2311 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2314 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2317 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2320 gen_op_iwmmxt_subul_M0_wRn(rd1);
2323 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 gen_op_iwmmxt_set_cup();
2332 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2333 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2334 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2335 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2336 wrd = (insn >> 12) & 0xf;
2337 rd0 = (insn >> 16) & 0xf;
2338 gen_op_iwmmxt_movq_M0_wRn(rd0);
2339 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2340 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2341 gen_op_iwmmxt_movq_wRn_M0(wrd);
2342 gen_op_iwmmxt_set_mup();
2343 gen_op_iwmmxt_set_cup();
2345 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2346 case 0x418: case 0x518: case 0x618: case 0x718:
2347 case 0x818: case 0x918: case 0xa18: case 0xb18:
2348 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2349 wrd = (insn >> 12) & 0xf;
2350 rd0 = (insn >> 16) & 0xf;
2351 rd1 = (insn >> 0) & 0xf;
2352 gen_op_iwmmxt_movq_M0_wRn(rd0);
2353 switch ((insn >> 20) & 0xf) {
2355 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2358 gen_op_iwmmxt_addub_M0_wRn(rd1);
2361 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2364 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2367 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2370 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2373 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2376 gen_op_iwmmxt_addul_M0_wRn(rd1);
2379 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2384 gen_op_iwmmxt_movq_wRn_M0(wrd);
2385 gen_op_iwmmxt_set_mup();
2386 gen_op_iwmmxt_set_cup();
2388 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2389 case 0x408: case 0x508: case 0x608: case 0x708:
2390 case 0x808: case 0x908: case 0xa08: case 0xb08:
2391 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2392 wrd = (insn >> 12) & 0xf;
2393 rd0 = (insn >> 16) & 0xf;
2394 rd1 = (insn >> 0) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 if (!(insn & (1 << 20)))
2398 switch ((insn >> 22) & 3) {
2402 if (insn & (1 << 21))
2403 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2405 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2408 if (insn & (1 << 21))
2409 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2411 gen_op_iwmmxt_packul_M0_wRn(rd1);
2414 if (insn & (1 << 21))
2415 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2417 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2420 gen_op_iwmmxt_movq_wRn_M0(wrd);
2421 gen_op_iwmmxt_set_mup();
2422 gen_op_iwmmxt_set_cup();
2424 case 0x201: case 0x203: case 0x205: case 0x207:
2425 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2426 case 0x211: case 0x213: case 0x215: case 0x217:
2427 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2428 wrd = (insn >> 5) & 0xf;
2429 rd0 = (insn >> 12) & 0xf;
2430 rd1 = (insn >> 0) & 0xf;
2431 if (rd0 == 0xf || rd1 == 0xf)
2433 gen_op_iwmmxt_movq_M0_wRn(wrd);
2434 switch ((insn >> 16) & 0xf) {
2435 case 0x0: /* TMIA */
2436 gen_movl_T0_reg(s, rd0);
2437 gen_movl_T1_reg(s, rd1);
2438 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2440 case 0x8: /* TMIAPH */
2441 gen_movl_T0_reg(s, rd0);
2442 gen_movl_T1_reg(s, rd1);
2443 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2445 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2446 gen_movl_T1_reg(s, rd0);
2447 if (insn & (1 << 16))
2448 gen_op_shrl_T1_im(16);
2449 gen_op_movl_T0_T1();
2450 gen_movl_T1_reg(s, rd1);
2451 if (insn & (1 << 17))
2452 gen_op_shrl_T1_im(16);
2453 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2458 gen_op_iwmmxt_movq_wRn_M0(wrd);
2459 gen_op_iwmmxt_set_mup();
2468 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2469 (ie. an undefined instruction). */
2470 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2472 int acc, rd0, rd1, rdhi, rdlo;
2474 if ((insn & 0x0ff00f10) == 0x0e200010) {
2475 /* Multiply with Internal Accumulate Format */
2476 rd0 = (insn >> 12) & 0xf;
2478 acc = (insn >> 5) & 7;
2483 switch ((insn >> 16) & 0xf) {
2485 gen_movl_T0_reg(s, rd0);
2486 gen_movl_T1_reg(s, rd1);
2487 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2489 case 0x8: /* MIAPH */
2490 gen_movl_T0_reg(s, rd0);
2491 gen_movl_T1_reg(s, rd1);
2492 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2494 case 0xc: /* MIABB */
2495 case 0xd: /* MIABT */
2496 case 0xe: /* MIATB */
2497 case 0xf: /* MIATT */
2498 gen_movl_T1_reg(s, rd0);
2499 if (insn & (1 << 16))
2500 gen_op_shrl_T1_im(16);
2501 gen_op_movl_T0_T1();
2502 gen_movl_T1_reg(s, rd1);
2503 if (insn & (1 << 17))
2504 gen_op_shrl_T1_im(16);
2505 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2511 gen_op_iwmmxt_movq_wRn_M0(acc);
2515 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2516 /* Internal Accumulator Access Format */
2517 rdhi = (insn >> 16) & 0xf;
2518 rdlo = (insn >> 12) & 0xf;
2524 if (insn & ARM_CP_RW_BIT) { /* MRA */
2525 gen_iwmmxt_movl_T0_T1_wRn(acc);
2526 gen_movl_reg_T0(s, rdlo);
2527 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2528 gen_op_andl_T0_T1();
2529 gen_movl_reg_T0(s, rdhi);
2531 gen_movl_T0_reg(s, rdlo);
2532 gen_movl_T1_reg(s, rdhi);
2533 gen_iwmmxt_movl_wRn_T0_T1(acc);
2541 /* Disassemble system coprocessor instruction. Return nonzero if
2542 instruction is not defined. */
2543 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2546 uint32_t rd = (insn >> 12) & 0xf;
2547 uint32_t cp = (insn >> 8) & 0xf;
2552 if (insn & ARM_CP_RW_BIT) {
2553 if (!env->cp[cp].cp_read)
2555 gen_set_pc_im(s->pc);
2557 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2558 store_reg(s, rd, tmp);
2560 if (!env->cp[cp].cp_write)
2562 gen_set_pc_im(s->pc);
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2570 static int cp15_user_ok(uint32_t insn)
2572 int cpn = (insn >> 16) & 0xf;
2573 int cpm = insn & 0xf;
2574 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2576 if (cpn == 13 && cpm == 0) {
2578 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2582 /* ISB, DSB, DMB. */
2583 if ((cpm == 5 && op == 4)
2584 || (cpm == 10 && (op == 4 || op == 5)))
2590 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2591 instruction is not defined. */
2592 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2597 /* M profile cores use memory mapped registers instead of cp15. */
2598 if (arm_feature(env, ARM_FEATURE_M))
2601 if ((insn & (1 << 25)) == 0) {
2602 if (insn & (1 << 20)) {
2606 /* mcrr. Used for block cache operations, so implement as no-op. */
2609 if ((insn & (1 << 4)) == 0) {
2613 if (IS_USER(s) && !cp15_user_ok(insn)) {
2616 if ((insn & 0x0fff0fff) == 0x0e070f90
2617 || (insn & 0x0fff0fff) == 0x0e070f58) {
2618 /* Wait for interrupt. */
2619 gen_set_pc_im(s->pc);
2620 s->is_jmp = DISAS_WFI;
2623 rd = (insn >> 12) & 0xf;
2624 if (insn & ARM_CP_RW_BIT) {
2626 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2627 /* If the destination register is r15 then sets condition codes. */
2629 store_reg(s, rd, tmp);
2633 tmp = load_reg(s, rd);
2634 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2636 /* Normally we would always end the TB here, but Linux
2637 * arch/arm/mach-pxa/sleep.S expects two instructions following
2638 * an MMU enable to execute from cache. Imitate this behaviour. */
2639 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2640 (insn & 0x0fff0fff) != 0x0e010f10)
2646 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2647 #define VFP_SREG(insn, bigbit, smallbit) \
2648 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2649 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2650 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2651 reg = (((insn) >> (bigbit)) & 0x0f) \
2652 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2654 if (insn & (1 << (smallbit))) \
2656 reg = ((insn) >> (bigbit)) & 0x0f; \
2659 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2660 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2661 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2662 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2663 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2664 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2666 /* Move between integer and VFP cores. */
2667 static TCGv gen_vfp_mrs(void)
2669 TCGv tmp = new_tmp();
2670 tcg_gen_mov_i32(tmp, cpu_F0s);
2674 static void gen_vfp_msr(TCGv tmp)
2676 tcg_gen_mov_i32(cpu_F0s, tmp);
2681 vfp_enabled(CPUState * env)
2683 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2686 static void gen_neon_dup_u8(TCGv var, int shift)
2688 TCGv tmp = new_tmp();
2690 tcg_gen_shri_i32(var, var, shift);
2691 tcg_gen_ext8u_i32(var, var);
2692 tcg_gen_shli_i32(tmp, var, 8);
2693 tcg_gen_or_i32(var, var, tmp);
2694 tcg_gen_shli_i32(tmp, var, 16);
2695 tcg_gen_or_i32(var, var, tmp);
2699 static void gen_neon_dup_low16(TCGv var)
2701 TCGv tmp = new_tmp();
2702 tcg_gen_ext16u_i32(var, var);
2703 tcg_gen_shli_i32(tmp, var, 16);
2704 tcg_gen_or_i32(var, var, tmp);
2708 static void gen_neon_dup_high16(TCGv var)
2710 TCGv tmp = new_tmp();
2711 tcg_gen_andi_i32(var, var, 0xffff0000);
2712 tcg_gen_shri_i32(tmp, var, 16);
2713 tcg_gen_or_i32(var, var, tmp);
2717 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2718 (ie. an undefined instruction). */
2719 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2721 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2726 if (!arm_feature(env, ARM_FEATURE_VFP))
2729 if (!vfp_enabled(env)) {
2730 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2731 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2733 rn = (insn >> 16) & 0xf;
2734 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2735 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2738 dp = ((insn & 0xf00) == 0xb00);
2739 switch ((insn >> 24) & 0xf) {
2741 if (insn & (1 << 4)) {
2742 /* single register transfer */
2743 rd = (insn >> 12) & 0xf;
2748 VFP_DREG_N(rn, insn);
2751 if (insn & 0x00c00060
2752 && !arm_feature(env, ARM_FEATURE_NEON))
2755 pass = (insn >> 21) & 1;
2756 if (insn & (1 << 22)) {
2758 offset = ((insn >> 5) & 3) * 8;
2759 } else if (insn & (1 << 5)) {
2761 offset = (insn & (1 << 6)) ? 16 : 0;
2766 if (insn & ARM_CP_RW_BIT) {
2768 tmp = neon_load_reg(rn, pass);
2772 tcg_gen_shri_i32(tmp, tmp, offset);
2773 if (insn & (1 << 23))
2779 if (insn & (1 << 23)) {
2781 tcg_gen_shri_i32(tmp, tmp, 16);
2787 tcg_gen_sari_i32(tmp, tmp, 16);
2796 store_reg(s, rd, tmp);
2799 tmp = load_reg(s, rd);
2800 if (insn & (1 << 23)) {
2803 gen_neon_dup_u8(tmp, 0);
2804 } else if (size == 1) {
2805 gen_neon_dup_low16(tmp);
2808 tcg_gen_mov_i32(tmp2, tmp);
2809 neon_store_reg(rn, 0, tmp2);
2810 neon_store_reg(rn, 0, tmp);
2815 tmp2 = neon_load_reg(rn, pass);
2816 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2820 tmp2 = neon_load_reg(rn, pass);
2821 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2827 neon_store_reg(rn, pass, tmp);
2831 if ((insn & 0x6f) != 0x00)
2833 rn = VFP_SREG_N(insn);
2834 if (insn & ARM_CP_RW_BIT) {
2836 if (insn & (1 << 21)) {
2837 /* system register */
2842 /* VFP2 allows access to FSID from userspace.
2843 VFP3 restricts all id registers to privileged
2846 && arm_feature(env, ARM_FEATURE_VFP3))
2848 tmp = load_cpu_field(vfp.xregs[rn]);
2853 tmp = load_cpu_field(vfp.xregs[rn]);
2855 case ARM_VFP_FPINST:
2856 case ARM_VFP_FPINST2:
2857 /* Not present in VFP3. */
2859 || arm_feature(env, ARM_FEATURE_VFP3))
2861 tmp = load_cpu_field(vfp.xregs[rn]);
2865 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2866 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2869 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2875 || !arm_feature(env, ARM_FEATURE_VFP3))
2877 tmp = load_cpu_field(vfp.xregs[rn]);
2883 gen_mov_F0_vreg(0, rn);
2884 tmp = gen_vfp_mrs();
2887 /* Set the 4 flag bits in the CPSR. */
2891 store_reg(s, rd, tmp);
2895 tmp = load_reg(s, rd);
2896 if (insn & (1 << 21)) {
2898 /* system register */
2903 /* Writes are ignored. */
2906 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2913 store_cpu_field(tmp, vfp.xregs[rn]);
2916 case ARM_VFP_FPINST:
2917 case ARM_VFP_FPINST2:
2918 store_cpu_field(tmp, vfp.xregs[rn]);
2925 gen_mov_vreg_F0(0, rn);
2930 /* data processing */
2931 /* The opcode is in bits 23, 21, 20 and 6. */
2932 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2936 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2938 /* rn is register number */
2939 VFP_DREG_N(rn, insn);
2942 if (op == 15 && (rn == 15 || rn > 17)) {
2943 /* Integer or single precision destination. */
2944 rd = VFP_SREG_D(insn);
2946 VFP_DREG_D(rd, insn);
2949 if (op == 15 && (rn == 16 || rn == 17)) {
2950 /* Integer source. */
2951 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2953 VFP_DREG_M(rm, insn);
2956 rn = VFP_SREG_N(insn);
2957 if (op == 15 && rn == 15) {
2958 /* Double precision destination. */
2959 VFP_DREG_D(rd, insn);
2961 rd = VFP_SREG_D(insn);
2963 rm = VFP_SREG_M(insn);
2966 veclen = env->vfp.vec_len;
2967 if (op == 15 && rn > 3)
2970 /* Shut up compiler warnings. */
2981 /* Figure out what type of vector operation this is. */
2982 if ((rd & bank_mask) == 0) {
2987 delta_d = (env->vfp.vec_stride >> 1) + 1;
2989 delta_d = env->vfp.vec_stride + 1;
2991 if ((rm & bank_mask) == 0) {
2992 /* mixed scalar/vector */
3001 /* Load the initial operands. */
3006 /* Integer source */
3007 gen_mov_F0_vreg(0, rm);
3012 gen_mov_F0_vreg(dp, rd);
3013 gen_mov_F1_vreg(dp, rm);
3017 /* Compare with zero */
3018 gen_mov_F0_vreg(dp, rd);
3025 /* Source and destination the same. */
3026 gen_mov_F0_vreg(dp, rd);
3029 /* One source operand. */
3030 gen_mov_F0_vreg(dp, rm);
3034 /* Two source operands. */
3035 gen_mov_F0_vreg(dp, rn);
3036 gen_mov_F1_vreg(dp, rm);
3040 /* Perform the calculation. */
3042 case 0: /* mac: fd + (fn * fm) */
3044 gen_mov_F1_vreg(dp, rd);
3047 case 1: /* nmac: fd - (fn * fm) */
3050 gen_mov_F1_vreg(dp, rd);
3053 case 2: /* msc: -fd + (fn * fm) */
3055 gen_mov_F1_vreg(dp, rd);
3058 case 3: /* nmsc: -fd - (fn * fm) */
3060 gen_mov_F1_vreg(dp, rd);
3064 case 4: /* mul: fn * fm */
3067 case 5: /* nmul: -(fn * fm) */
3071 case 6: /* add: fn + fm */
3074 case 7: /* sub: fn - fm */
3077 case 8: /* div: fn / fm */
3080 case 14: /* fconst */
3081 if (!arm_feature(env, ARM_FEATURE_VFP3))
3084 n = (insn << 12) & 0x80000000;
3085 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3092 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3099 tcg_gen_movi_i32(cpu_F0s, n);
3102 case 15: /* extension space */
3125 case 11: /* cmpez */
3129 case 15: /* single<->double conversion */
3131 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3133 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3135 case 16: /* fuito */
3138 case 17: /* fsito */
3141 case 20: /* fshto */
3142 if (!arm_feature(env, ARM_FEATURE_VFP3))
3144 gen_vfp_shto(dp, rm);
3146 case 21: /* fslto */
3147 if (!arm_feature(env, ARM_FEATURE_VFP3))
3149 gen_vfp_slto(dp, rm);
3151 case 22: /* fuhto */
3152 if (!arm_feature(env, ARM_FEATURE_VFP3))
3154 gen_vfp_uhto(dp, rm);
3156 case 23: /* fulto */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 gen_vfp_ulto(dp, rm);
3161 case 24: /* ftoui */
3164 case 25: /* ftouiz */
3167 case 26: /* ftosi */
3170 case 27: /* ftosiz */
3173 case 28: /* ftosh */
3174 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 gen_vfp_tosh(dp, rm);
3178 case 29: /* ftosl */
3179 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 gen_vfp_tosl(dp, rm);
3183 case 30: /* ftouh */
3184 if (!arm_feature(env, ARM_FEATURE_VFP3))
3186 gen_vfp_touh(dp, rm);
3188 case 31: /* ftoul */
3189 if (!arm_feature(env, ARM_FEATURE_VFP3))
3191 gen_vfp_toul(dp, rm);
3193 default: /* undefined */
3194 printf ("rn:%d\n", rn);
3198 default: /* undefined */
3199 printf ("op:%d\n", op);
3203 /* Write back the result. */
3204 if (op == 15 && (rn >= 8 && rn <= 11))
3205 ; /* Comparison, do nothing. */
3206 else if (op == 15 && rn > 17)
3207 /* Integer result. */
3208 gen_mov_vreg_F0(0, rd);
3209 else if (op == 15 && rn == 15)
3211 gen_mov_vreg_F0(!dp, rd);
3213 gen_mov_vreg_F0(dp, rd);
3215 /* break out of the loop if we have finished */
3219 if (op == 15 && delta_m == 0) {
3220 /* single source one-many */
3222 rd = ((rd + delta_d) & (bank_mask - 1))
3224 gen_mov_vreg_F0(dp, rd);
3228 /* Setup the next operands. */
3230 rd = ((rd + delta_d) & (bank_mask - 1))
3234 /* One source operand. */
3235 rm = ((rm + delta_m) & (bank_mask - 1))
3237 gen_mov_F0_vreg(dp, rm);
3239 /* Two source operands. */
3240 rn = ((rn + delta_d) & (bank_mask - 1))
3242 gen_mov_F0_vreg(dp, rn);
3244 rm = ((rm + delta_m) & (bank_mask - 1))
3246 gen_mov_F1_vreg(dp, rm);
3254 if (dp && (insn & 0x03e00000) == 0x00400000) {
3255 /* two-register transfer */
3256 rn = (insn >> 16) & 0xf;
3257 rd = (insn >> 12) & 0xf;
3259 VFP_DREG_M(rm, insn);
3261 rm = VFP_SREG_M(insn);
3264 if (insn & ARM_CP_RW_BIT) {
3267 gen_mov_F0_vreg(0, rm * 2);
3268 tmp = gen_vfp_mrs();
3269 store_reg(s, rd, tmp);
3270 gen_mov_F0_vreg(0, rm * 2 + 1);
3271 tmp = gen_vfp_mrs();
3272 store_reg(s, rn, tmp);
3274 gen_mov_F0_vreg(0, rm);
3275 tmp = gen_vfp_mrs();
3276 store_reg(s, rn, tmp);
3277 gen_mov_F0_vreg(0, rm + 1);
3278 tmp = gen_vfp_mrs();
3279 store_reg(s, rd, tmp);
3284 tmp = load_reg(s, rd);
3286 gen_mov_vreg_F0(0, rm * 2);
3287 tmp = load_reg(s, rn);
3289 gen_mov_vreg_F0(0, rm * 2 + 1);
3291 tmp = load_reg(s, rn);
3293 gen_mov_vreg_F0(0, rm);
3294 tmp = load_reg(s, rd);
3296 gen_mov_vreg_F0(0, rm + 1);
3301 rn = (insn >> 16) & 0xf;
3303 VFP_DREG_D(rd, insn);
3305 rd = VFP_SREG_D(insn);
3306 if (s->thumb && rn == 15) {
3307 gen_op_movl_T1_im(s->pc & ~2);
3309 gen_movl_T1_reg(s, rn);
3311 if ((insn & 0x01200000) == 0x01000000) {
3312 /* Single load/store */
3313 offset = (insn & 0xff) << 2;
3314 if ((insn & (1 << 23)) == 0)
3316 gen_op_addl_T1_im(offset);
3317 if (insn & (1 << 20)) {
3319 gen_mov_vreg_F0(dp, rd);
3321 gen_mov_F0_vreg(dp, rd);
3325 /* load/store multiple */
3327 n = (insn >> 1) & 0x7f;
3331 if (insn & (1 << 24)) /* pre-decrement */
3332 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3338 for (i = 0; i < n; i++) {
3339 if (insn & ARM_CP_RW_BIT) {
3342 gen_mov_vreg_F0(dp, rd + i);
3345 gen_mov_F0_vreg(dp, rd + i);
3348 gen_op_addl_T1_im(offset);
3350 if (insn & (1 << 21)) {
3352 if (insn & (1 << 24))
3353 offset = -offset * n;
3354 else if (dp && (insn & 1))
3360 gen_op_addl_T1_im(offset);
3361 gen_movl_reg_T1(s, rn);
3367 /* Should never happen. */
3373 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3375 TranslationBlock *tb;
3378 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3380 gen_set_pc_im(dest);
3381 tcg_gen_exit_tb((long)tb + n);
3383 gen_set_pc_im(dest);
3388 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3390 if (unlikely(s->singlestep_enabled)) {
3391 /* An indirect jump so that we still trigger the debug exception. */
3396 gen_goto_tb(s, 0, dest);
3397 s->is_jmp = DISAS_TB_JUMP;
3401 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3404 tcg_gen_sari_i32(t0, t0, 16);
3408 tcg_gen_sari_i32(t1, t1, 16);
3411 tcg_gen_mul_i32(t0, t0, t1);
3414 /* Return the mask of PSR bits set by a MSR instruction. */
3415 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3419 if (flags & (1 << 0))
3421 if (flags & (1 << 1))
3423 if (flags & (1 << 2))
3425 if (flags & (1 << 3))
3428 /* Mask out undefined bits. */
3429 mask &= ~CPSR_RESERVED;
3430 if (!arm_feature(env, ARM_FEATURE_V6))
3431 mask &= ~(CPSR_E | CPSR_GE);
3432 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3434 /* Mask out execution state bits. */
3437 /* Mask out privileged bits. */
3443 /* Returns nonzero if access to the PSR is not permitted. */
3444 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3448 /* ??? This is also undefined in system mode. */
3452 tmp = load_cpu_field(spsr);
3453 tcg_gen_andi_i32(tmp, tmp, ~mask);
3454 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3455 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3456 store_cpu_field(tmp, spsr);
3458 gen_set_cpsr(cpu_T[0], mask);
3464 /* Generate an old-style exception return. */
3465 static void gen_exception_return(DisasContext *s)
3468 gen_movl_reg_T0(s, 15);
3469 tmp = load_cpu_field(spsr);
3470 gen_set_cpsr(tmp, 0xffffffff);
3472 s->is_jmp = DISAS_UPDATE;
3475 /* Generate a v6 exception return. Marks both values as dead. */
3476 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3478 gen_set_cpsr(cpsr, 0xffffffff);
3480 store_reg(s, 15, pc);
3481 s->is_jmp = DISAS_UPDATE;
3485 gen_set_condexec (DisasContext *s)
3487 if (s->condexec_mask) {
3488 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3489 TCGv tmp = new_tmp();
3490 tcg_gen_movi_i32(tmp, val);
3491 store_cpu_field(tmp, condexec_bits);
3495 static void gen_nop_hint(DisasContext *s, int val)
3499 gen_set_pc_im(s->pc);
3500 s->is_jmp = DISAS_WFI;
3504 /* TODO: Implement SEV and WFE. May help SMP performance. */
3510 /* These macros help make the code more readable when migrating from the
3511 old dyngen helpers. They should probably be removed when
3512 T0/T1 are removed. */
3513 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3514 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3516 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3518 static inline int gen_neon_add(int size)
3521 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3522 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3523 case 2: gen_op_addl_T0_T1(); break;
3529 static inline void gen_neon_rsb(int size)
3532 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3533 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3534 case 2: gen_op_rsbl_T0_T1(); break;
3539 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3540 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3541 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3542 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3543 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3545 /* FIXME: This is wrong. They set the wrong overflow bit. */
3546 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3547 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3548 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3549 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3551 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3552 switch ((size << 1) | u) { \
3554 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3557 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3560 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3563 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3566 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3569 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3571 default: return 1; \
3574 #define GEN_NEON_INTEGER_OP(name) do { \
3575 switch ((size << 1) | u) { \
3577 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3580 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3583 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3586 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3589 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3592 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3594 default: return 1; \
3598 gen_neon_movl_scratch_T0(int scratch)
3602 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3603 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3607 gen_neon_movl_scratch_T1(int scratch)
3611 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3612 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3616 gen_neon_movl_T0_scratch(int scratch)
3620 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3621 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3625 gen_neon_movl_T1_scratch(int scratch)
3629 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3630 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3633 static inline void gen_neon_get_scalar(int size, int reg)
3636 NEON_GET_REG(T0, reg >> 1, reg & 1);
3638 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3640 gen_neon_dup_low16(cpu_T[0]);
3642 gen_neon_dup_high16(cpu_T[0]);
3646 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3650 for (n = 0; n < q + 1; n += 2) {
3651 NEON_GET_REG(T0, reg, n);
3652 NEON_GET_REG(T0, reg, n + n);
3654 case 0: gen_helper_neon_unzip_u8(); break;
3655 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3656 case 2: /* no-op */; break;
3659 gen_neon_movl_scratch_T0(tmp + n);
3660 gen_neon_movl_scratch_T1(tmp + n + 1);
3668 } neon_ls_element_type[11] = {
3682 /* Translate a NEON load/store element instruction. Return nonzero if the
3683 instruction is invalid. */
3684 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3700 if (!vfp_enabled(env))
3702 VFP_DREG_D(rd, insn);
3703 rn = (insn >> 16) & 0xf;
3705 load = (insn & (1 << 21)) != 0;
3706 if ((insn & (1 << 23)) == 0) {
3707 /* Load store all elements. */
3708 op = (insn >> 8) & 0xf;
3709 size = (insn >> 6) & 3;
3710 if (op > 10 || size == 3)
3712 nregs = neon_ls_element_type[op].nregs;
3713 interleave = neon_ls_element_type[op].interleave;
3714 gen_movl_T1_reg(s, rn);
3715 stride = (1 << size) * interleave;
3716 for (reg = 0; reg < nregs; reg++) {
3717 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3718 gen_movl_T1_reg(s, rn);
3719 gen_op_addl_T1_im((1 << size) * reg);
3720 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3721 gen_movl_T1_reg(s, rn);
3722 gen_op_addl_T1_im(1 << size);
3724 for (pass = 0; pass < 2; pass++) {
3727 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3728 neon_store_reg(rd, pass, tmp);
3730 tmp = neon_load_reg(rd, pass);
3731 gen_st32(tmp, cpu_T[1], IS_USER(s));
3733 gen_op_addl_T1_im(stride);
3734 } else if (size == 1) {
3736 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3737 gen_op_addl_T1_im(stride);
3738 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3739 gen_op_addl_T1_im(stride);
3740 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3742 neon_store_reg(rd, pass, tmp);
3744 tmp = neon_load_reg(rd, pass);
3746 tcg_gen_shri_i32(tmp2, tmp, 16);
3747 gen_st16(tmp, cpu_T[1], IS_USER(s));
3748 gen_op_addl_T1_im(stride);
3749 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3750 gen_op_addl_T1_im(stride);
3752 } else /* size == 0 */ {
3755 for (n = 0; n < 4; n++) {
3756 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3757 gen_op_addl_T1_im(stride);
3761 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3765 neon_store_reg(rd, pass, tmp2);
3767 tmp2 = neon_load_reg(rd, pass);
3768 for (n = 0; n < 4; n++) {
3771 tcg_gen_mov_i32(tmp, tmp2);
3773 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3775 gen_st8(tmp, cpu_T[1], IS_USER(s));
3776 gen_op_addl_T1_im(stride);
3782 rd += neon_ls_element_type[op].spacing;
3786 size = (insn >> 10) & 3;
3788 /* Load single element to all lanes. */
3791 size = (insn >> 6) & 3;
3792 nregs = ((insn >> 8) & 3) + 1;
3793 stride = (insn & (1 << 5)) ? 2 : 1;
3794 gen_movl_T1_reg(s, rn);
3795 for (reg = 0; reg < nregs; reg++) {
3798 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3799 gen_neon_dup_u8(tmp, 0);
3802 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3803 gen_neon_dup_low16(tmp);
3806 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3810 default: /* Avoid compiler warnings. */
3813 gen_op_addl_T1_im(1 << size);
3815 tcg_gen_mov_i32(tmp2, tmp);
3816 neon_store_reg(rd, 0, tmp2);
3817 neon_store_reg(rd, 0, tmp);
3820 stride = (1 << size) * nregs;
3822 /* Single element. */
3823 pass = (insn >> 7) & 1;
3826 shift = ((insn >> 5) & 3) * 8;
3830 shift = ((insn >> 6) & 1) * 16;
3831 stride = (insn & (1 << 5)) ? 2 : 1;
3835 stride = (insn & (1 << 6)) ? 2 : 1;
3840 nregs = ((insn >> 8) & 3) + 1;
3841 gen_movl_T1_reg(s, rn);
3842 for (reg = 0; reg < nregs; reg++) {
3846 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3849 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3852 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3854 default: /* Avoid compiler warnings. */
3858 tmp2 = neon_load_reg(rd, pass);
3859 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3862 neon_store_reg(rd, pass, tmp);
3863 } else { /* Store */
3864 tmp = neon_load_reg(rd, pass);
3866 tcg_gen_shri_i32(tmp, tmp, shift);
3869 gen_st8(tmp, cpu_T[1], IS_USER(s));
3872 gen_st16(tmp, cpu_T[1], IS_USER(s));
3875 gen_st32(tmp, cpu_T[1], IS_USER(s));
3880 gen_op_addl_T1_im(1 << size);
3882 stride = nregs * (1 << size);
3888 base = load_reg(s, rn);
3890 tcg_gen_addi_i32(base, base, stride);
3893 index = load_reg(s, rm);
3894 tcg_gen_add_i32(base, base, index);
3897 store_reg(s, rn, base);
3902 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3903 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3905 tcg_gen_and_i32(t, t, c);
3906 tcg_gen_bic_i32(f, f, c);
3907 tcg_gen_or_i32(dest, t, f);
3910 static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
3913 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3914 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3915 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3920 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
3923 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3924 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3925 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3930 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
3933 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3934 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3935 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3940 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3946 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3947 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3952 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3953 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3960 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3961 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3966 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3967 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3974 static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
3978 case 0: gen_helper_neon_widen_u8(dest, src); break;
3979 case 1: gen_helper_neon_widen_u16(dest, src); break;
3980 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3985 case 0: gen_helper_neon_widen_s8(dest, src); break;
3986 case 1: gen_helper_neon_widen_s16(dest, src); break;
3987 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3994 static inline void gen_neon_addl(int size)
3997 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3998 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3999 case 2: tcg_gen_add_i64(CPU_V001); break;
4004 static inline void gen_neon_subl(int size)
4007 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4008 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4009 case 2: tcg_gen_sub_i64(CPU_V001); break;
4014 static inline void gen_neon_negl(TCGv var, int size)
4017 case 0: gen_helper_neon_negl_u16(var, var); break;
4018 case 1: gen_helper_neon_negl_u32(var, var); break;
4019 case 2: gen_helper_neon_negl_u64(var, var); break;
4024 static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
4027 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4028 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4033 static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
4037 switch ((size << 1) | u) {
4038 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4039 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4040 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4041 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4043 tmp = gen_muls_i64_i32(a, b);
4044 tcg_gen_mov_i64(dest, tmp);
4047 tmp = gen_mulu_i64_i32(a, b);
4048 tcg_gen_mov_i64(dest, tmp);
4058 /* Translate a NEON data processing instruction. Return nonzero if the
4059 instruction is invalid.
4060 We process data in a mixture of 32-bit and 64-bit chunks.
4061 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4063 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4080 if (!vfp_enabled(env))
4082 q = (insn & (1 << 6)) != 0;
4083 u = (insn >> 24) & 1;
4084 VFP_DREG_D(rd, insn);
4085 VFP_DREG_N(rn, insn);
4086 VFP_DREG_M(rm, insn);
4087 size = (insn >> 20) & 3;
4088 if ((insn & (1 << 23)) == 0) {
4089 /* Three register same length. */
4090 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4091 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4092 || op == 10 || op == 11 || op == 16)) {
4093 /* 64-bit element instructions. */
4094 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4095 neon_load_reg64(cpu_V0, rn + pass);
4096 neon_load_reg64(cpu_V1, rm + pass);
4100 gen_helper_neon_add_saturate_u64(CPU_V001);
4102 gen_helper_neon_add_saturate_s64(CPU_V001);
4107 gen_helper_neon_sub_saturate_u64(CPU_V001);
4109 gen_helper_neon_sub_saturate_s64(CPU_V001);
4114 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4116 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4121 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4124 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4128 case 10: /* VRSHL */
4130 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4132 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4135 case 11: /* VQRSHL */
4137 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4140 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4146 tcg_gen_sub_i64(CPU_V001);
4148 tcg_gen_add_i64(CPU_V001);
4154 neon_store_reg64(cpu_V0, rd + pass);
4161 case 10: /* VRSHL */
4162 case 11: /* VQRSHL */
4165 /* Shift instruction operands are reversed. */
4172 case 20: /* VPMAX */
4173 case 21: /* VPMIN */
4174 case 23: /* VPADD */
4177 case 26: /* VPADD (float) */
4178 pairwise = (u && size < 2);
4180 case 30: /* VPMIN/VPMAX (float) */
4187 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4196 NEON_GET_REG(T0, rn, n);
4197 NEON_GET_REG(T1, rn, n + 1);
4199 NEON_GET_REG(T0, rm, n);
4200 NEON_GET_REG(T1, rm, n + 1);
4204 NEON_GET_REG(T0, rn, pass);
4205 NEON_GET_REG(T1, rm, pass);
4209 GEN_NEON_INTEGER_OP(hadd);
4212 GEN_NEON_INTEGER_OP_ENV(qadd);
4214 case 2: /* VRHADD */
4215 GEN_NEON_INTEGER_OP(rhadd);
4217 case 3: /* Logic ops. */
4218 switch ((u << 2) | size) {
4220 gen_op_andl_T0_T1();
4223 gen_op_bicl_T0_T1();
4233 gen_op_xorl_T0_T1();
4236 tmp = neon_load_reg(rd, pass);
4237 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4241 tmp = neon_load_reg(rd, pass);
4242 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4246 tmp = neon_load_reg(rd, pass);
4247 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4253 GEN_NEON_INTEGER_OP(hsub);
4256 GEN_NEON_INTEGER_OP_ENV(qsub);
4259 GEN_NEON_INTEGER_OP(cgt);
4262 GEN_NEON_INTEGER_OP(cge);
4265 GEN_NEON_INTEGER_OP(shl);
4268 GEN_NEON_INTEGER_OP_ENV(qshl);
4270 case 10: /* VRSHL */
4271 GEN_NEON_INTEGER_OP(rshl);
4273 case 11: /* VQRSHL */
4274 GEN_NEON_INTEGER_OP_ENV(qrshl);
4277 GEN_NEON_INTEGER_OP(max);
4280 GEN_NEON_INTEGER_OP(min);
4283 GEN_NEON_INTEGER_OP(abd);
4286 GEN_NEON_INTEGER_OP(abd);
4287 NEON_GET_REG(T1, rd, pass);
4291 if (!u) { /* VADD */
4292 if (gen_neon_add(size))
4296 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4297 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4298 case 2: gen_op_subl_T0_T1(); break;
4304 if (!u) { /* VTST */
4306 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4307 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4308 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4313 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4314 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4315 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4320 case 18: /* Multiply. */
4322 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4323 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4324 case 2: gen_op_mul_T0_T1(); break;
4327 NEON_GET_REG(T1, rd, pass);
4335 if (u) { /* polynomial */
4336 gen_helper_neon_mul_p8(CPU_T001);
4337 } else { /* Integer */
4339 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4340 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4341 case 2: gen_op_mul_T0_T1(); break;
4346 case 20: /* VPMAX */
4347 GEN_NEON_INTEGER_OP(pmax);
4349 case 21: /* VPMIN */
4350 GEN_NEON_INTEGER_OP(pmin);
4352 case 22: /* Hultiply high. */
4353 if (!u) { /* VQDMULH */
4355 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4356 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4359 } else { /* VQRDHMUL */
4361 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4362 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4367 case 23: /* VPADD */
4371 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4372 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4373 case 2: gen_op_addl_T0_T1(); break;
4377 case 26: /* Floating point arithnetic. */
4378 switch ((u << 2) | size) {
4380 gen_helper_neon_add_f32(CPU_T001);
4383 gen_helper_neon_sub_f32(CPU_T001);
4386 gen_helper_neon_add_f32(CPU_T001);
4389 gen_helper_neon_abd_f32(CPU_T001);
4395 case 27: /* Float multiply. */
4396 gen_helper_neon_mul_f32(CPU_T001);
4398 NEON_GET_REG(T1, rd, pass);
4400 gen_helper_neon_add_f32(CPU_T001);
4402 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4406 case 28: /* Float compare. */
4408 gen_helper_neon_ceq_f32(CPU_T001);
4411 gen_helper_neon_cge_f32(CPU_T001);
4413 gen_helper_neon_cgt_f32(CPU_T001);
4416 case 29: /* Float compare absolute. */
4420 gen_helper_neon_acge_f32(CPU_T001);
4422 gen_helper_neon_acgt_f32(CPU_T001);
4424 case 30: /* Float min/max. */
4426 gen_helper_neon_max_f32(CPU_T001);
4428 gen_helper_neon_min_f32(CPU_T001);
4432 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4434 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4439 /* Save the result. For elementwise operations we can put it
4440 straight into the destination register. For pairwise operations
4441 we have to be careful to avoid clobbering the source operands. */
4442 if (pairwise && rd == rm) {
4443 gen_neon_movl_scratch_T0(pass);
4445 NEON_SET_REG(T0, rd, pass);
4449 if (pairwise && rd == rm) {
4450 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4451 gen_neon_movl_T0_scratch(pass);
4452 NEON_SET_REG(T0, rd, pass);
4455 /* End of 3 register same size operations. */
4456 } else if (insn & (1 << 4)) {
4457 if ((insn & 0x00380080) != 0) {
4458 /* Two registers and shift. */
4459 op = (insn >> 8) & 0xf;
4460 if (insn & (1 << 7)) {
4465 while ((insn & (1 << (size + 19))) == 0)
4468 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4469 /* To avoid excessive dumplication of ops we implement shift
4470 by immediate using the variable shift operations. */
4472 /* Shift by immediate:
4473 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4474 /* Right shifts are encoded as N - shift, where N is the
4475 element size in bits. */
4477 shift = shift - (1 << (size + 3));
4485 imm = (uint8_t) shift;
4490 imm = (uint16_t) shift;
4501 for (pass = 0; pass < count; pass++) {
4503 neon_load_reg64(cpu_V0, rm + pass);
4504 tcg_gen_movi_i64(cpu_V1, imm);
4509 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4511 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4516 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4518 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4523 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4525 case 5: /* VSHL, VSLI */
4526 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4530 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4532 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4534 case 7: /* VQSHLU */
4535 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4538 if (op == 1 || op == 3) {
4540 neon_load_reg64(cpu_V0, rd + pass);
4541 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4542 } else if (op == 4 || (op == 5 && u)) {
4544 cpu_abort(env, "VS[LR]I.64 not implemented");
4546 neon_store_reg64(cpu_V0, rd + pass);
4547 } else { /* size < 3 */
4548 /* Operands in T0 and T1. */
4549 gen_op_movl_T1_im(imm);
4550 NEON_GET_REG(T0, rm, pass);
4554 GEN_NEON_INTEGER_OP(shl);
4558 GEN_NEON_INTEGER_OP(rshl);
4563 GEN_NEON_INTEGER_OP(shl);
4565 case 5: /* VSHL, VSLI */
4567 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4568 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4569 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4574 GEN_NEON_INTEGER_OP_ENV(qshl);
4576 case 7: /* VQSHLU */
4578 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4579 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4580 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4586 if (op == 1 || op == 3) {
4588 NEON_GET_REG(T1, rd, pass);
4590 } else if (op == 4 || (op == 5 && u)) {
4595 imm = 0xff >> -shift;
4597 imm = (uint8_t)(0xff << shift);
4603 imm = 0xffff >> -shift;
4605 imm = (uint16_t)(0xffff << shift);
4610 imm = 0xffffffffu >> -shift;
4612 imm = 0xffffffffu << shift;
4617 tmp = neon_load_reg(rd, pass);
4618 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4619 tcg_gen_andi_i32(tmp, tmp, ~imm);
4620 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4622 NEON_SET_REG(T0, rd, pass);
4625 } else if (op < 10) {
4626 /* Shift by immediate and narrow:
4627 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4628 shift = shift - (1 << (size + 3));
4632 imm = (uint16_t)shift;
4634 tmp2 = tcg_const_i32(imm);
4637 imm = (uint32_t)shift;
4638 tmp2 = tcg_const_i32(imm);
4640 tmp2 = tcg_const_i64(shift);
4646 for (pass = 0; pass < 2; pass++) {
4648 neon_load_reg64(cpu_V0, rm + pass);
4651 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
4653 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
4656 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
4658 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
4661 tmp = neon_load_reg(rm + pass, 0);
4662 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4663 tmp3 = neon_load_reg(rm + pass, 1);
4664 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4665 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4670 if (op == 8 && !u) {
4671 gen_neon_narrow(size - 1, tmp, cpu_V0);
4674 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4676 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4681 neon_store_reg(rd, 0, tmp2);
4682 neon_store_reg(rd, 1, tmp);
4685 } else if (op == 10) {
4689 tmp = neon_load_reg(rm, 0);
4690 tmp2 = neon_load_reg(rm, 1);
4691 for (pass = 0; pass < 2; pass++) {
4695 gen_neon_widen(cpu_V0, tmp, size, u);
4698 /* The shift is less than the width of the source
4699 type, so we can just shift the whole register. */
4700 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4701 if (size < 2 || !u) {
4704 imm = (0xffu >> (8 - shift));
4707 imm = 0xffff >> (16 - shift);
4709 imm64 = imm | (((uint64_t)imm) << 32);
4710 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4713 neon_store_reg64(cpu_V0, rd + pass);
4715 } else if (op == 15 || op == 16) {
4716 /* VCVT fixed-point. */
4717 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4718 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4721 gen_vfp_ulto(0, shift);
4723 gen_vfp_slto(0, shift);
4726 gen_vfp_toul(0, shift);
4728 gen_vfp_tosl(0, shift);
4730 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4735 } else { /* (insn & 0x00380080) == 0 */
4738 op = (insn >> 8) & 0xf;
4739 /* One register and immediate. */
4740 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4741 invert = (insn & (1 << 5)) != 0;
4759 imm = (imm << 8) | (imm << 24);
4762 imm = (imm < 8) | 0xff;
4765 imm = (imm << 16) | 0xffff;
4768 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4773 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4774 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4780 if (op != 14 || !invert)
4781 gen_op_movl_T1_im(imm);
4783 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4784 if (op & 1 && op < 12) {
4785 tmp = neon_load_reg(rd, pass);
4787 /* The immediate value has already been inverted, so
4789 tcg_gen_andi_i32(tmp, tmp, imm);
4791 tcg_gen_ori_i32(tmp, tmp, imm);
4796 if (op == 14 && invert) {
4799 for (n = 0; n < 4; n++) {
4800 if (imm & (1 << (n + (pass & 1) * 4)))
4801 val |= 0xff << (n * 8);
4803 tcg_gen_movi_i32(tmp, val);
4805 tcg_gen_movi_i32(tmp, imm);
4808 neon_store_reg(rd, pass, tmp);
4811 } else { /* (insn & 0x00800010 == 0x00800000) */
4813 op = (insn >> 8) & 0xf;
4814 if ((insn & (1 << 6)) == 0) {
4815 /* Three registers of different lengths. */
4819 /* prewiden, src1_wide, src2_wide */
4820 static const int neon_3reg_wide[16][3] = {
4821 {1, 0, 0}, /* VADDL */
4822 {1, 1, 0}, /* VADDW */
4823 {1, 0, 0}, /* VSUBL */
4824 {1, 1, 0}, /* VSUBW */
4825 {0, 1, 1}, /* VADDHN */
4826 {0, 0, 0}, /* VABAL */
4827 {0, 1, 1}, /* VSUBHN */
4828 {0, 0, 0}, /* VABDL */
4829 {0, 0, 0}, /* VMLAL */
4830 {0, 0, 0}, /* VQDMLAL */
4831 {0, 0, 0}, /* VMLSL */
4832 {0, 0, 0}, /* VQDMLSL */
4833 {0, 0, 0}, /* Integer VMULL */
4834 {0, 0, 0}, /* VQDMULL */
4835 {0, 0, 0} /* Polynomial VMULL */
4838 prewiden = neon_3reg_wide[op][0];
4839 src1_wide = neon_3reg_wide[op][1];
4840 src2_wide = neon_3reg_wide[op][2];
4842 if (size == 0 && (op == 9 || op == 11 || op == 13))
4845 /* Avoid overlapping operands. Wide source operands are
4846 always aligned so will never overlap with wide
4847 destinations in problematic ways. */
4848 if (rd == rm && !src2_wide) {
4849 NEON_GET_REG(T0, rm, 1);
4850 gen_neon_movl_scratch_T0(2);
4851 } else if (rd == rn && !src1_wide) {
4852 NEON_GET_REG(T0, rn, 1);
4853 gen_neon_movl_scratch_T0(2);
4856 for (pass = 0; pass < 2; pass++) {
4858 neon_load_reg64(cpu_V0, rn + pass);
4861 if (pass == 1 && rd == rn) {
4862 gen_neon_movl_T0_scratch(2);
4864 tcg_gen_mov_i32(tmp, cpu_T[0]);
4866 tmp = neon_load_reg(rn, pass);
4869 gen_neon_widen(cpu_V0, tmp, size, u);
4873 neon_load_reg64(cpu_V1, rm + pass);
4876 if (pass == 1 && rd == rm) {
4877 gen_neon_movl_T0_scratch(2);
4879 tcg_gen_mov_i32(tmp2, cpu_T[0]);
4881 tmp2 = neon_load_reg(rm, pass);
4884 gen_neon_widen(cpu_V1, tmp2, size, u);
4888 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4889 gen_neon_addl(size);
4891 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4892 gen_neon_subl(size);
4894 case 5: case 7: /* VABAL, VABDL */
4895 switch ((size << 1) | u) {
4897 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4900 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4903 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4906 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4909 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4912 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4919 case 8: case 9: case 10: case 11: case 12: case 13:
4920 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4921 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
4923 case 14: /* Polynomial VMULL */
4924 cpu_abort(env, "Polynomial VMULL not implemented");
4926 default: /* 15 is RESERVED. */
4929 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4931 if (op == 10 || op == 11) {
4932 gen_neon_negl(cpu_V0, size);
4936 neon_load_reg64(cpu_V1, rd + pass);
4940 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4941 gen_neon_addl(size);
4943 case 9: case 11: /* VQDMLAL, VQDMLSL */
4944 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4945 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4948 case 13: /* VQDMULL */
4949 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4954 neon_store_reg64(cpu_V0, rd + pass);
4955 } else if (op == 4 || op == 6) {
4956 /* Narrowing operation. */
4961 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4964 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4967 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4968 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4975 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4978 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4981 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4982 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4983 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4991 neon_store_reg(rd, 0, tmp3);
4992 neon_store_reg(rd, 1, tmp);
4995 /* Write back the result. */
4996 neon_store_reg64(cpu_V0, rd + pass);
5000 /* Two registers and a scalar. */
5002 case 0: /* Integer VMLA scalar */
5003 case 1: /* Float VMLA scalar */
5004 case 4: /* Integer VMLS scalar */
5005 case 5: /* Floating point VMLS scalar */
5006 case 8: /* Integer VMUL scalar */
5007 case 9: /* Floating point VMUL scalar */
5008 case 12: /* VQDMULH scalar */
5009 case 13: /* VQRDMULH scalar */
5010 gen_neon_get_scalar(size, rm);
5011 gen_neon_movl_scratch_T0(0);
5012 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5014 gen_neon_movl_T0_scratch(0);
5015 NEON_GET_REG(T1, rn, pass);
5018 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5020 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5022 } else if (op == 13) {
5024 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5026 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5028 } else if (op & 1) {
5029 gen_helper_neon_mul_f32(CPU_T001);
5032 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5033 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5034 case 2: gen_op_mul_T0_T1(); break;
5040 NEON_GET_REG(T1, rd, pass);
5046 gen_helper_neon_add_f32(CPU_T001);
5052 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5058 NEON_SET_REG(T0, rd, pass);
5061 case 2: /* VMLAL sclar */
5062 case 3: /* VQDMLAL scalar */
5063 case 6: /* VMLSL scalar */
5064 case 7: /* VQDMLSL scalar */
5065 case 10: /* VMULL scalar */
5066 case 11: /* VQDMULL scalar */
5067 if (size == 0 && (op == 3 || op == 7 || op == 11))
5070 gen_neon_get_scalar(size, rm);
5071 NEON_GET_REG(T1, rn, 1);
5073 for (pass = 0; pass < 2; pass++) {
5075 tmp = neon_load_reg(rn, 0);
5078 tcg_gen_mov_i32(tmp, cpu_T[1]);
5081 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5082 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5083 if (op == 6 || op == 7) {
5084 gen_neon_negl(cpu_V0, size);
5087 neon_load_reg64(cpu_V1, rd + pass);
5091 gen_neon_addl(size);
5094 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5095 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5101 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5106 neon_store_reg64(cpu_V0, rd + pass);
5109 default: /* 14 and 15 are RESERVED */
5113 } else { /* size == 3 */
5116 imm = (insn >> 8) & 0xf;
5123 neon_load_reg64(cpu_V0, rn);
5125 neon_load_reg64(cpu_V1, rn + 1);
5127 } else if (imm == 8) {
5128 neon_load_reg64(cpu_V0, rn + 1);
5130 neon_load_reg64(cpu_V1, rm);
5133 tmp = tcg_temp_new(TCG_TYPE_I64);
5135 neon_load_reg64(cpu_V0, rn);
5136 neon_load_reg64(tmp, rn + 1);
5138 neon_load_reg64(cpu_V0, rn + 1);
5139 neon_load_reg64(tmp, rm);
5141 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5142 tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
5143 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5145 neon_load_reg64(cpu_V1, rm);
5147 neon_load_reg64(cpu_V1, rm + 1);
5150 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5151 tcg_gen_shri_i64(tmp, tmp, imm * 8);
5152 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
5154 neon_load_reg64(cpu_V0, rn);
5155 tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
5156 neon_load_reg64(cpu_V1, rm);
5157 tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
5158 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5160 neon_store_reg64(cpu_V0, rd);
5162 neon_store_reg64(cpu_V1, rd + 1);
5164 } else if ((insn & (1 << 11)) == 0) {
5165 /* Two register misc. */
5166 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5167 size = (insn >> 18) & 3;
5169 case 0: /* VREV64 */
5172 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5173 NEON_GET_REG(T0, rm, pass * 2);
5174 NEON_GET_REG(T1, rm, pass * 2 + 1);
5176 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5177 case 1: gen_swap_half(cpu_T[0]); break;
5178 case 2: /* no-op */ break;
5181 NEON_SET_REG(T0, rd, pass * 2 + 1);
5183 NEON_SET_REG(T1, rd, pass * 2);
5185 gen_op_movl_T0_T1();
5187 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5188 case 1: gen_swap_half(cpu_T[0]); break;
5191 NEON_SET_REG(T0, rd, pass * 2);
5195 case 4: case 5: /* VPADDL */
5196 case 12: case 13: /* VPADAL */
5199 for (pass = 0; pass < q + 1; pass++) {
5200 tmp = neon_load_reg(rm, pass * 2);
5201 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5202 tmp = neon_load_reg(rm, pass * 2 + 1);
5203 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5205 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5206 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5207 case 2: tcg_gen_add_i64(CPU_V001); break;
5212 neon_load_reg64(cpu_V1, rd + pass);
5213 gen_neon_addl(size);
5215 neon_store_reg64(cpu_V0, rd + pass);
5220 for (n = 0; n < (q ? 4 : 2); n += 2) {
5221 NEON_GET_REG(T0, rm, n);
5222 NEON_GET_REG(T1, rd, n + 1);
5223 NEON_SET_REG(T1, rm, n);
5224 NEON_SET_REG(T0, rd, n + 1);
5232 Rd A3 A2 A1 A0 B2 B0 A2 A0
5233 Rm B3 B2 B1 B0 B3 B1 A3 A1
5237 gen_neon_unzip(rd, q, 0, size);
5238 gen_neon_unzip(rm, q, 4, size);
5240 static int unzip_order_q[8] =
5241 {0, 2, 4, 6, 1, 3, 5, 7};
5242 for (n = 0; n < 8; n++) {
5243 int reg = (n < 4) ? rd : rm;
5244 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5245 NEON_SET_REG(T0, reg, n % 4);
5248 static int unzip_order[4] =
5250 for (n = 0; n < 4; n++) {
5251 int reg = (n < 2) ? rd : rm;
5252 gen_neon_movl_T0_scratch(unzip_order[n]);
5253 NEON_SET_REG(T0, reg, n % 2);
5259 Rd A3 A2 A1 A0 B1 A1 B0 A0
5260 Rm B3 B2 B1 B0 B3 A3 B2 A2
5264 count = (q ? 4 : 2);
5265 for (n = 0; n < count; n++) {
5266 NEON_GET_REG(T0, rd, n);
5267 NEON_GET_REG(T1, rd, n);
5269 case 0: gen_helper_neon_zip_u8(); break;
5270 case 1: gen_helper_neon_zip_u16(); break;
5271 case 2: /* no-op */; break;
5274 gen_neon_movl_scratch_T0(n * 2);
5275 gen_neon_movl_scratch_T1(n * 2 + 1);
5277 for (n = 0; n < count * 2; n++) {
5278 int reg = (n < count) ? rd : rm;
5279 gen_neon_movl_T0_scratch(n);
5280 NEON_SET_REG(T0, reg, n % count);
5283 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5287 for (pass = 0; pass < 2; pass++) {
5288 neon_load_reg64(cpu_V0, rm + pass);
5290 if (op == 36 && q == 0) {
5291 gen_neon_narrow(size, tmp, cpu_V0);
5293 gen_neon_narrow_satu(size, tmp, cpu_V0);
5295 gen_neon_narrow_sats(size, tmp, cpu_V0);
5300 neon_store_reg(rd, 0, tmp2);
5301 neon_store_reg(rd, 1, tmp);
5305 case 38: /* VSHLL */
5308 tmp = neon_load_reg(rm, 0);
5309 tmp2 = neon_load_reg(rm, 1);
5310 for (pass = 0; pass < 2; pass++) {
5313 gen_neon_widen(cpu_V0, tmp, size, 1);
5314 neon_store_reg64(cpu_V0, rd + pass);
5319 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5320 if (op == 30 || op == 31 || op >= 58) {
5321 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5322 neon_reg_offset(rm, pass));
5324 NEON_GET_REG(T0, rm, pass);
5327 case 1: /* VREV32 */
5329 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5330 case 1: gen_swap_half(cpu_T[0]); break;
5334 case 2: /* VREV16 */
5337 gen_rev16(cpu_T[0]);
5341 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5342 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5343 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5349 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5350 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5351 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5358 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5365 case 14: /* VQABS */
5367 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5368 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5369 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5373 case 15: /* VQNEG */
5375 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5376 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5377 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5381 case 16: case 19: /* VCGT #0, VCLE #0 */
5382 gen_op_movl_T1_im(0);
5384 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5385 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5386 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5392 case 17: case 20: /* VCGE #0, VCLT #0 */
5393 gen_op_movl_T1_im(0);
5395 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5396 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5397 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5403 case 18: /* VCEQ #0 */
5404 gen_op_movl_T1_im(0);
5406 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5407 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5408 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5414 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5415 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5416 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5421 gen_op_movl_T1_im(0);
5426 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5427 gen_op_movl_T1_im(0);
5428 gen_helper_neon_cgt_f32(CPU_T001);
5432 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5433 gen_op_movl_T1_im(0);
5434 gen_helper_neon_cge_f32(CPU_T001);
5438 case 26: /* Float VCEQ #0 */
5439 gen_op_movl_T1_im(0);
5440 gen_helper_neon_ceq_f32(CPU_T001);
5442 case 30: /* Float VABS */
5445 case 31: /* Float VNEG */
5449 NEON_GET_REG(T1, rd, pass);
5450 NEON_SET_REG(T1, rm, pass);
5453 NEON_GET_REG(T1, rd, pass);
5455 case 0: gen_helper_neon_trn_u8(); break;
5456 case 1: gen_helper_neon_trn_u16(); break;
5460 NEON_SET_REG(T1, rm, pass);
5462 case 56: /* Integer VRECPE */
5463 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5465 case 57: /* Integer VRSQRTE */
5466 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5468 case 58: /* Float VRECPE */
5469 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5471 case 59: /* Float VRSQRTE */
5472 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5474 case 60: /* VCVT.F32.S32 */
5477 case 61: /* VCVT.F32.U32 */
5480 case 62: /* VCVT.S32.F32 */
5483 case 63: /* VCVT.U32.F32 */
5487 /* Reserved: 21, 29, 39-56 */
5490 if (op == 30 || op == 31 || op >= 58) {
5491 tcg_gen_st_f32(cpu_F0s, cpu_env,
5492 neon_reg_offset(rd, pass));
5494 NEON_SET_REG(T0, rd, pass);
5499 } else if ((insn & (1 << 10)) == 0) {
5501 n = (insn >> 5) & 0x18;
5502 if (insn & (1 << 6)) {
5503 tmp = neon_load_reg(rd, 0);
5506 tcg_gen_movi_i32(tmp, 0);
5508 tmp2 = neon_load_reg(rm, 0);
5509 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5511 if (insn & (1 << 6)) {
5512 tmp = neon_load_reg(rd, 1);
5515 tcg_gen_movi_i32(tmp, 0);
5517 tmp3 = neon_load_reg(rm, 1);
5518 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5520 neon_store_reg(rd, 0, tmp2);
5521 neon_store_reg(rd, 1, tmp2);
5522 } else if ((insn & 0x380) == 0) {
5524 if (insn & (1 << 19)) {
5525 NEON_SET_REG(T0, rm, 1);
5527 NEON_SET_REG(T0, rm, 0);
5529 if (insn & (1 << 16)) {
5530 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5531 } else if (insn & (1 << 17)) {
5532 if ((insn >> 18) & 1)
5533 gen_neon_dup_high16(cpu_T[0]);
5535 gen_neon_dup_low16(cpu_T[0]);
5537 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5538 NEON_SET_REG(T0, rd, pass);
5548 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5552 cpnum = (insn >> 8) & 0xf;
5553 if (arm_feature(env, ARM_FEATURE_XSCALE)
5554 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5560 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5561 return disas_iwmmxt_insn(env, s, insn);
5562 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5563 return disas_dsp_insn(env, s, insn);
5568 return disas_vfp_insn (env, s, insn);
5570 return disas_cp15_insn (env, s, insn);
5572 /* Unknown coprocessor. See if the board has hooked it. */
5573 return disas_cp_insn (env, s, insn);
5578 /* Store a 64-bit value to a register pair. Clobbers val. */
5579 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
5583 tcg_gen_trunc_i64_i32(tmp, val);
5584 store_reg(s, rlow, tmp);
5586 tcg_gen_shri_i64(val, val, 32);
5587 tcg_gen_trunc_i64_i32(tmp, val);
5588 store_reg(s, rhigh, tmp);
5591 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5592 static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
5597 /* Load value and extend to 64 bits. */
5598 tmp = tcg_temp_new(TCG_TYPE_I64);
5599 tmp2 = load_reg(s, rlow);
5600 tcg_gen_extu_i32_i64(tmp, tmp2);
5602 tcg_gen_add_i64(val, val, tmp);
5605 /* load and add a 64-bit value from a register pair. */
5606 static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
5612 /* Load 64-bit value rd:rn. */
5613 tmpl = load_reg(s, rlow);
5614 tmph = load_reg(s, rhigh);
5615 tmp = tcg_temp_new(TCG_TYPE_I64);
5616 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5619 tcg_gen_add_i64(val, val, tmp);
5622 /* Set N and Z flags from a 64-bit value. */
5623 static void gen_logicq_cc(TCGv val)
5625 TCGv tmp = new_tmp();
5626 gen_helper_logicq_cc(tmp, val);
5631 static void disas_arm_insn(CPUState * env, DisasContext *s)
5633 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5639 insn = ldl_code(s->pc);
5642 /* M variants do not implement ARM mode. */
5647 /* Unconditional instructions. */
5648 if (((insn >> 25) & 7) == 1) {
5649 /* NEON Data processing. */
5650 if (!arm_feature(env, ARM_FEATURE_NEON))
5653 if (disas_neon_data_insn(env, s, insn))
5657 if ((insn & 0x0f100000) == 0x04000000) {
5658 /* NEON load/store. */
5659 if (!arm_feature(env, ARM_FEATURE_NEON))
5662 if (disas_neon_ls_insn(env, s, insn))
5666 if ((insn & 0x0d70f000) == 0x0550f000)
5668 else if ((insn & 0x0ffffdff) == 0x01010000) {
5671 if (insn & (1 << 9)) {
5672 /* BE8 mode not implemented. */
5676 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5677 switch ((insn >> 4) & 0xf) {
5680 gen_helper_clrex(cpu_env);
5686 /* We don't emulate caches so these are a no-op. */
5691 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5697 op1 = (insn & 0x1f);
5698 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5699 addr = load_reg(s, 13);
5702 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5704 i = (insn >> 23) & 3;
5706 case 0: offset = -4; break; /* DA */
5707 case 1: offset = -8; break; /* DB */
5708 case 2: offset = 0; break; /* IA */
5709 case 3: offset = 4; break; /* IB */
5713 tcg_gen_addi_i32(addr, addr, offset);
5714 tmp = load_reg(s, 14);
5715 gen_st32(tmp, addr, 0);
5717 gen_helper_cpsr_read(tmp);
5718 tcg_gen_addi_i32(addr, addr, 4);
5719 gen_st32(tmp, addr, 0);
5720 if (insn & (1 << 21)) {
5721 /* Base writeback. */
5723 case 0: offset = -8; break;
5724 case 1: offset = -4; break;
5725 case 2: offset = 4; break;
5726 case 3: offset = 0; break;
5730 tcg_gen_addi_i32(addr, tmp, offset);
5731 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5732 gen_movl_reg_T1(s, 13);
5734 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
5739 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5745 rn = (insn >> 16) & 0xf;
5746 addr = load_reg(s, rn);
5747 i = (insn >> 23) & 3;
5749 case 0: offset = -4; break; /* DA */
5750 case 1: offset = -8; break; /* DB */
5751 case 2: offset = 0; break; /* IA */
5752 case 3: offset = 4; break; /* IB */
5756 tcg_gen_addi_i32(addr, addr, offset);
5757 /* Load PC into tmp and CPSR into tmp2. */
5758 tmp = gen_ld32(addr, 0);
5759 tcg_gen_addi_i32(addr, addr, 4);
5760 tmp2 = gen_ld32(addr, 0);
5761 if (insn & (1 << 21)) {
5762 /* Base writeback. */
5764 case 0: offset = -8; break;
5765 case 1: offset = -4; break;
5766 case 2: offset = 4; break;
5767 case 3: offset = 0; break;
5771 tcg_gen_addi_i32(addr, addr, offset);
5772 store_reg(s, rn, addr);
5776 gen_rfe(s, tmp, tmp2);
5777 } else if ((insn & 0x0e000000) == 0x0a000000) {
5778 /* branch link and change to thumb (blx <offset>) */
5781 val = (uint32_t)s->pc;
5783 tcg_gen_movi_i32(tmp, val);
5784 store_reg(s, 14, tmp);
5785 /* Sign-extend the 24-bit offset */
5786 offset = (((int32_t)insn) << 8) >> 8;
5787 /* offset * 4 + bit24 * 2 + (thumb bit) */
5788 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5789 /* pipeline offset */
5793 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5794 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5795 /* iWMMXt register transfer. */
5796 if (env->cp15.c15_cpar & (1 << 1))
5797 if (!disas_iwmmxt_insn(env, s, insn))
5800 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5801 /* Coprocessor double register transfer. */
5802 } else if ((insn & 0x0f000010) == 0x0e000010) {
5803 /* Additional coprocessor register transfer. */
5804 } else if ((insn & 0x0ff10020) == 0x01000000) {
5807 /* cps (privileged) */
5811 if (insn & (1 << 19)) {
5812 if (insn & (1 << 8))
5814 if (insn & (1 << 7))
5816 if (insn & (1 << 6))
5818 if (insn & (1 << 18))
5821 if (insn & (1 << 17)) {
5823 val |= (insn & 0x1f);
5826 gen_op_movl_T0_im(val);
5827 gen_set_psr_T0(s, mask, 0);
5834 /* if not always execute, we generate a conditional jump to
5836 s->condlabel = gen_new_label();
5837 gen_test_cc(cond ^ 1, s->condlabel);
5840 if ((insn & 0x0f900000) == 0x03000000) {
5841 if ((insn & (1 << 21)) == 0) {
5843 rd = (insn >> 12) & 0xf;
5844 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5845 if ((insn & (1 << 22)) == 0) {
5848 tcg_gen_movi_i32(tmp, val);
5851 tmp = load_reg(s, rd);
5852 tcg_gen_ext16u_i32(tmp, tmp);
5853 tcg_gen_ori_i32(tmp, tmp, val << 16);
5855 store_reg(s, rd, tmp);
5857 if (((insn >> 12) & 0xf) != 0xf)
5859 if (((insn >> 16) & 0xf) == 0) {
5860 gen_nop_hint(s, insn & 0xff);
5862 /* CPSR = immediate */
5864 shift = ((insn >> 8) & 0xf) * 2;
5866 val = (val >> shift) | (val << (32 - shift));
5867 gen_op_movl_T0_im(val);
5868 i = ((insn & (1 << 22)) != 0);
5869 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5873 } else if ((insn & 0x0f900000) == 0x01000000
5874 && (insn & 0x00000090) != 0x00000090) {
5875 /* miscellaneous instructions */
5876 op1 = (insn >> 21) & 3;
5877 sh = (insn >> 4) & 0xf;
5880 case 0x0: /* move program status register */
5883 gen_movl_T0_reg(s, rm);
5884 i = ((op1 & 2) != 0);
5885 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5889 rd = (insn >> 12) & 0xf;
5893 tmp = load_cpu_field(spsr);
5896 gen_helper_cpsr_read(tmp);
5898 store_reg(s, rd, tmp);
5903 /* branch/exchange thumb (bx). */
5904 tmp = load_reg(s, rm);
5906 } else if (op1 == 3) {
5908 rd = (insn >> 12) & 0xf;
5909 tmp = load_reg(s, rm);
5910 gen_helper_clz(tmp, tmp);
5911 store_reg(s, rd, tmp);
5919 /* Trivial implementation equivalent to bx. */
5920 tmp = load_reg(s, rm);
5930 /* branch link/exchange thumb (blx) */
5931 tmp = load_reg(s, rm);
5933 tcg_gen_movi_i32(tmp2, s->pc);
5934 store_reg(s, 14, tmp2);
5937 case 0x5: /* saturating add/subtract */
5938 rd = (insn >> 12) & 0xf;
5939 rn = (insn >> 16) & 0xf;
5940 tmp = load_reg(s, rm);
5941 tmp2 = load_reg(s, rn);
5943 gen_helper_double_saturate(tmp2, tmp2);
5945 gen_helper_sub_saturate(tmp, tmp, tmp2);
5947 gen_helper_add_saturate(tmp, tmp, tmp2);
5949 store_reg(s, rd, tmp);
5952 gen_set_condexec(s);
5953 gen_set_pc_im(s->pc - 4);
5954 gen_exception(EXCP_BKPT);
5955 s->is_jmp = DISAS_JUMP;
5957 case 0x8: /* signed multiply */
5961 rs = (insn >> 8) & 0xf;
5962 rn = (insn >> 12) & 0xf;
5963 rd = (insn >> 16) & 0xf;
5965 /* (32 * 16) >> 16 */
5966 tmp = load_reg(s, rm);
5967 tmp2 = load_reg(s, rs);
5969 tcg_gen_sari_i32(tmp2, tmp2, 16);
5972 tmp2 = gen_muls_i64_i32(tmp, tmp2);
5973 tcg_gen_shri_i64(tmp2, tmp2, 16);
5975 tcg_gen_trunc_i64_i32(tmp, tmp2);
5976 if ((sh & 2) == 0) {
5977 tmp2 = load_reg(s, rn);
5978 gen_helper_add_setq(tmp, tmp, tmp2);
5981 store_reg(s, rd, tmp);
5984 tmp = load_reg(s, rm);
5985 tmp2 = load_reg(s, rs);
5986 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
5989 tmp2 = tcg_temp_new(TCG_TYPE_I64);
5990 tcg_gen_ext_i32_i64(tmp2, tmp);
5992 gen_addq(s, tmp2, rn, rd);
5993 gen_storeq_reg(s, rn, rd, tmp2);
5996 tmp2 = load_reg(s, rn);
5997 gen_helper_add_setq(tmp, tmp, tmp2);
6000 store_reg(s, rd, tmp);
6007 } else if (((insn & 0x0e000000) == 0 &&
6008 (insn & 0x00000090) != 0x90) ||
6009 ((insn & 0x0e000000) == (1 << 25))) {
6010 int set_cc, logic_cc, shiftop;
6012 op1 = (insn >> 21) & 0xf;
6013 set_cc = (insn >> 20) & 1;
6014 logic_cc = table_logic_cc[op1] & set_cc;
6016 /* data processing instruction */
6017 if (insn & (1 << 25)) {
6018 /* immediate operand */
6020 shift = ((insn >> 8) & 0xf) * 2;
6022 val = (val >> shift) | (val << (32 - shift));
6023 gen_op_movl_T1_im(val);
6024 if (logic_cc && shift)
6025 gen_set_CF_bit31(cpu_T[1]);
6029 gen_movl_T1_reg(s, rm);
6030 shiftop = (insn >> 5) & 3;
6031 if (!(insn & (1 << 4))) {
6032 shift = (insn >> 7) & 0x1f;
6033 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
6035 rs = (insn >> 8) & 0xf;
6036 tmp = load_reg(s, rs);
6037 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
6040 if (op1 != 0x0f && op1 != 0x0d) {
6041 rn = (insn >> 16) & 0xf;
6042 gen_movl_T0_reg(s, rn);
6044 rd = (insn >> 12) & 0xf;
6047 gen_op_andl_T0_T1();
6048 gen_movl_reg_T0(s, rd);
6050 gen_op_logic_T0_cc();
6053 gen_op_xorl_T0_T1();
6054 gen_movl_reg_T0(s, rd);
6056 gen_op_logic_T0_cc();
6059 if (set_cc && rd == 15) {
6060 /* SUBS r15, ... is used for exception return. */
6063 gen_op_subl_T0_T1_cc();
6064 gen_exception_return(s);
6067 gen_op_subl_T0_T1_cc();
6069 gen_op_subl_T0_T1();
6070 gen_movl_reg_T0(s, rd);
6075 gen_op_rsbl_T0_T1_cc();
6077 gen_op_rsbl_T0_T1();
6078 gen_movl_reg_T0(s, rd);
6082 gen_op_addl_T0_T1_cc();
6084 gen_op_addl_T0_T1();
6085 gen_movl_reg_T0(s, rd);
6089 gen_op_adcl_T0_T1_cc();
6092 gen_movl_reg_T0(s, rd);
6096 gen_op_sbcl_T0_T1_cc();
6099 gen_movl_reg_T0(s, rd);
6103 gen_op_rscl_T0_T1_cc();
6106 gen_movl_reg_T0(s, rd);
6110 gen_op_andl_T0_T1();
6111 gen_op_logic_T0_cc();
6116 gen_op_xorl_T0_T1();
6117 gen_op_logic_T0_cc();
6122 gen_op_subl_T0_T1_cc();
6127 gen_op_addl_T0_T1_cc();
6132 gen_movl_reg_T0(s, rd);
6134 gen_op_logic_T0_cc();
6137 if (logic_cc && rd == 15) {
6138 /* MOVS r15, ... is used for exception return. */
6141 gen_op_movl_T0_T1();
6142 gen_exception_return(s);
6144 gen_movl_reg_T1(s, rd);
6146 gen_op_logic_T1_cc();
6150 gen_op_bicl_T0_T1();
6151 gen_movl_reg_T0(s, rd);
6153 gen_op_logic_T0_cc();
6158 gen_movl_reg_T1(s, rd);
6160 gen_op_logic_T1_cc();
6164 /* other instructions */
6165 op1 = (insn >> 24) & 0xf;
6169 /* multiplies, extra load/stores */
6170 sh = (insn >> 5) & 3;
6173 rd = (insn >> 16) & 0xf;
6174 rn = (insn >> 12) & 0xf;
6175 rs = (insn >> 8) & 0xf;
6177 op1 = (insn >> 20) & 0xf;
6179 case 0: case 1: case 2: case 3: case 6:
6181 tmp = load_reg(s, rs);
6182 tmp2 = load_reg(s, rm);
6183 tcg_gen_mul_i32(tmp, tmp, tmp2);
6185 if (insn & (1 << 22)) {
6186 /* Subtract (mls) */
6188 tmp2 = load_reg(s, rn);
6189 tcg_gen_sub_i32(tmp, tmp2, tmp);
6191 } else if (insn & (1 << 21)) {
6193 tmp2 = load_reg(s, rn);
6194 tcg_gen_add_i32(tmp, tmp, tmp2);
6197 if (insn & (1 << 20))
6199 store_reg(s, rd, tmp);
6203 tmp = load_reg(s, rs);
6204 tmp2 = load_reg(s, rm);
6205 if (insn & (1 << 22))
6206 tmp = gen_muls_i64_i32(tmp, tmp2);
6208 tmp = gen_mulu_i64_i32(tmp, tmp2);
6209 if (insn & (1 << 21)) /* mult accumulate */
6210 gen_addq(s, tmp, rn, rd);
6211 if (!(insn & (1 << 23))) { /* double accumulate */
6213 gen_addq_lo(s, tmp, rn);
6214 gen_addq_lo(s, tmp, rd);
6216 if (insn & (1 << 20))
6218 gen_storeq_reg(s, rn, rd, tmp);
6222 rn = (insn >> 16) & 0xf;
6223 rd = (insn >> 12) & 0xf;
6224 if (insn & (1 << 23)) {
6225 /* load/store exclusive */
6226 gen_movl_T1_reg(s, rn);
6228 if (insn & (1 << 20)) {
6229 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6230 tmp = gen_ld32(addr, IS_USER(s));
6231 store_reg(s, rd, tmp);
6233 int label = gen_new_label();
6235 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6236 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6238 tmp = load_reg(s,rm);
6239 gen_st32(tmp, cpu_T[1], IS_USER(s));
6240 gen_set_label(label);
6241 gen_movl_reg_T0(s, rd);
6244 /* SWP instruction */
6247 /* ??? This is not really atomic. However we know
6248 we never have multiple CPUs running in parallel,
6249 so it is good enough. */
6250 addr = load_reg(s, rn);
6251 tmp = load_reg(s, rm);
6252 if (insn & (1 << 22)) {
6253 tmp2 = gen_ld8u(addr, IS_USER(s));
6254 gen_st8(tmp, addr, IS_USER(s));
6256 tmp2 = gen_ld32(addr, IS_USER(s));
6257 gen_st32(tmp, addr, IS_USER(s));
6260 store_reg(s, rd, tmp2);
6266 /* Misc load/store */
6267 rn = (insn >> 16) & 0xf;
6268 rd = (insn >> 12) & 0xf;
6269 addr = load_reg(s, rn);
6270 if (insn & (1 << 24))
6271 gen_add_datah_offset(s, insn, 0, addr);
6273 if (insn & (1 << 20)) {
6277 tmp = gen_ld16u(addr, IS_USER(s));
6280 tmp = gen_ld8s(addr, IS_USER(s));
6284 tmp = gen_ld16s(addr, IS_USER(s));
6288 } else if (sh & 2) {
6292 tmp = load_reg(s, rd);
6293 gen_st32(tmp, addr, IS_USER(s));
6294 tcg_gen_addi_i32(addr, addr, 4);
6295 tmp = load_reg(s, rd + 1);
6296 gen_st32(tmp, addr, IS_USER(s));
6300 tmp = gen_ld32(addr, IS_USER(s));
6301 store_reg(s, rd, tmp);
6302 tcg_gen_addi_i32(addr, addr, 4);
6303 tmp = gen_ld32(addr, IS_USER(s));
6307 address_offset = -4;
6310 tmp = load_reg(s, rd);
6311 gen_st16(tmp, addr, IS_USER(s));
6314 /* Perform base writeback before the loaded value to
6315 ensure correct behavior with overlapping index registers.
6316 ldrd with base writeback is is undefined if the
6317 destination and index registers overlap. */
6318 if (!(insn & (1 << 24))) {
6319 gen_add_datah_offset(s, insn, address_offset, addr);
6320 store_reg(s, rn, addr);
6321 } else if (insn & (1 << 21)) {
6323 tcg_gen_addi_i32(addr, addr, address_offset);
6324 store_reg(s, rn, addr);
6329 /* Complete the load. */
6330 store_reg(s, rd, tmp);
6339 if (insn & (1 << 4)) {
6341 /* Armv6 Media instructions. */
6343 rn = (insn >> 16) & 0xf;
6344 rd = (insn >> 12) & 0xf;
6345 rs = (insn >> 8) & 0xf;
6346 switch ((insn >> 23) & 3) {
6347 case 0: /* Parallel add/subtract. */
6348 op1 = (insn >> 20) & 7;
6349 tmp = load_reg(s, rn);
6350 tmp2 = load_reg(s, rm);
6351 sh = (insn >> 5) & 7;
6352 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6354 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6356 store_reg(s, rd, tmp);
6359 if ((insn & 0x00700020) == 0) {
6360 /* Halfword pack. */
6361 tmp = load_reg(s, rn);
6362 tmp2 = load_reg(s, rm);
6363 shift = (insn >> 7) & 0x1f;
6364 if (insn & (1 << 6)) {
6368 tcg_gen_sari_i32(tmp2, tmp2, shift);
6369 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6370 tcg_gen_ext16u_i32(tmp2, tmp2);
6374 tcg_gen_shli_i32(tmp2, tmp2, shift);
6375 tcg_gen_ext16u_i32(tmp, tmp);
6376 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6378 tcg_gen_or_i32(tmp, tmp, tmp2);
6380 store_reg(s, rd, tmp);
6381 } else if ((insn & 0x00200020) == 0x00200000) {
6383 tmp = load_reg(s, rm);
6384 shift = (insn >> 7) & 0x1f;
6385 if (insn & (1 << 6)) {
6388 tcg_gen_sari_i32(tmp, tmp, shift);
6390 tcg_gen_shli_i32(tmp, tmp, shift);
6392 sh = (insn >> 16) & 0x1f;
6394 if (insn & (1 << 22))
6395 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6397 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6399 store_reg(s, rd, tmp);
6400 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6402 tmp = load_reg(s, rm);
6403 sh = (insn >> 16) & 0x1f;
6405 if (insn & (1 << 22))
6406 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6408 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6410 store_reg(s, rd, tmp);
6411 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6413 tmp = load_reg(s, rn);
6414 tmp2 = load_reg(s, rm);
6416 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6417 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6420 store_reg(s, rd, tmp);
6421 } else if ((insn & 0x000003e0) == 0x00000060) {
6422 tmp = load_reg(s, rm);
6423 shift = (insn >> 10) & 3;
6424 /* ??? In many cases it's not neccessary to do a
6425 rotate, a shift is sufficient. */
6427 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6428 op1 = (insn >> 20) & 7;
6430 case 0: gen_sxtb16(tmp); break;
6431 case 2: gen_sxtb(tmp); break;
6432 case 3: gen_sxth(tmp); break;
6433 case 4: gen_uxtb16(tmp); break;
6434 case 6: gen_uxtb(tmp); break;
6435 case 7: gen_uxth(tmp); break;
6436 default: goto illegal_op;
6439 tmp2 = load_reg(s, rn);
6440 if ((op1 & 3) == 0) {
6441 gen_add16(tmp, tmp2);
6443 tcg_gen_add_i32(tmp, tmp, tmp2);
6447 store_reg(s, rd, tmp);
6448 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6450 tmp = load_reg(s, rm);
6451 if (insn & (1 << 22)) {
6452 if (insn & (1 << 7)) {
6456 gen_helper_rbit(tmp, tmp);
6459 if (insn & (1 << 7))
6462 tcg_gen_bswap_i32(tmp, tmp);
6464 store_reg(s, rd, tmp);
6469 case 2: /* Multiplies (Type 3). */
6470 tmp = load_reg(s, rm);
6471 tmp2 = load_reg(s, rs);
6472 if (insn & (1 << 20)) {
6473 /* Signed multiply most significant [accumulate]. */
6474 tmp2 = gen_muls_i64_i32(tmp, tmp2);
6475 if (insn & (1 << 5))
6476 tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
6477 tcg_gen_shri_i64(tmp2, tmp2, 32);
6479 tcg_gen_trunc_i64_i32(tmp, tmp2);
6481 tmp2 = load_reg(s, rn);
6482 if (insn & (1 << 6)) {
6483 tcg_gen_sub_i32(tmp, tmp, tmp2);
6485 tcg_gen_add_i32(tmp, tmp, tmp2);
6489 store_reg(s, rd, tmp);
6491 if (insn & (1 << 5))
6492 gen_swap_half(tmp2);
6493 gen_smul_dual(tmp, tmp2);
6494 /* This addition cannot overflow. */
6495 if (insn & (1 << 6)) {
6496 tcg_gen_sub_i32(tmp, tmp, tmp2);
6498 tcg_gen_add_i32(tmp, tmp, tmp2);
6501 if (insn & (1 << 22)) {
6502 /* smlald, smlsld */
6503 tmp2 = tcg_temp_new(TCG_TYPE_I64);
6504 tcg_gen_ext_i32_i64(tmp2, tmp);
6506 gen_addq(s, tmp2, rd, rn);
6507 gen_storeq_reg(s, rd, rn, tmp2);
6509 /* smuad, smusd, smlad, smlsd */
6512 tmp2 = load_reg(s, rd);
6513 gen_helper_add_setq(tmp, tmp, tmp2);
6516 store_reg(s, rn, tmp);
6521 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6523 case 0: /* Unsigned sum of absolute differences. */
6525 tmp = load_reg(s, rm);
6526 tmp2 = load_reg(s, rs);
6527 gen_helper_usad8(tmp, tmp, tmp2);
6530 tmp2 = load_reg(s, rn);
6531 tcg_gen_add_i32(tmp, tmp, tmp2);
6534 store_reg(s, rd, tmp);
6536 case 0x20: case 0x24: case 0x28: case 0x2c:
6537 /* Bitfield insert/clear. */
6539 shift = (insn >> 7) & 0x1f;
6540 i = (insn >> 16) & 0x1f;
6544 tcg_gen_movi_i32(tmp, 0);
6546 tmp = load_reg(s, rm);
6549 tmp2 = load_reg(s, rd);
6550 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6553 store_reg(s, rd, tmp);
6555 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6556 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6557 tmp = load_reg(s, rm);
6558 shift = (insn >> 7) & 0x1f;
6559 i = ((insn >> 16) & 0x1f) + 1;
6564 gen_ubfx(tmp, shift, (1u << i) - 1);
6566 gen_sbfx(tmp, shift, i);
6569 store_reg(s, rd, tmp);
6579 /* Check for undefined extension instructions
6580 * per the ARM Bible IE:
6581 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6583 sh = (0xf << 20) | (0xf << 4);
6584 if (op1 == 0x7 && ((insn & sh) == sh))
6588 /* load/store byte/word */
6589 rn = (insn >> 16) & 0xf;
6590 rd = (insn >> 12) & 0xf;
6591 tmp2 = load_reg(s, rn);
6592 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6593 if (insn & (1 << 24))
6594 gen_add_data_offset(s, insn, tmp2);
6595 if (insn & (1 << 20)) {
6598 if (insn & (1 << 22)) {
6599 tmp = gen_ld8u(tmp2, i);
6601 tmp = gen_ld32(tmp2, i);
6605 tmp = load_reg(s, rd);
6606 if (insn & (1 << 22))
6607 gen_st8(tmp, tmp2, i);
6609 gen_st32(tmp, tmp2, i);
6611 if (!(insn & (1 << 24))) {
6612 gen_add_data_offset(s, insn, tmp2);
6613 store_reg(s, rn, tmp2);
6614 } else if (insn & (1 << 21)) {
6615 store_reg(s, rn, tmp2);
6619 if (insn & (1 << 20)) {
6620 /* Complete the load. */
6624 store_reg(s, rd, tmp);
6630 int j, n, user, loaded_base;
6632 /* load/store multiple words */
6633 /* XXX: store correct base if write back */
6635 if (insn & (1 << 22)) {
6637 goto illegal_op; /* only usable in supervisor mode */
6639 if ((insn & (1 << 15)) == 0)
6642 rn = (insn >> 16) & 0xf;
6643 addr = load_reg(s, rn);
6645 /* compute total size */
6647 TCGV_UNUSED(loaded_var);
6650 if (insn & (1 << i))
6653 /* XXX: test invalid n == 0 case ? */
6654 if (insn & (1 << 23)) {
6655 if (insn & (1 << 24)) {
6657 tcg_gen_addi_i32(addr, addr, 4);
6659 /* post increment */
6662 if (insn & (1 << 24)) {
6664 tcg_gen_addi_i32(addr, addr, -(n * 4));
6666 /* post decrement */
6668 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6673 if (insn & (1 << i)) {
6674 if (insn & (1 << 20)) {
6676 tmp = gen_ld32(addr, IS_USER(s));
6680 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6682 } else if (i == rn) {
6686 store_reg(s, i, tmp);
6691 /* special case: r15 = PC + 8 */
6692 val = (long)s->pc + 4;
6694 tcg_gen_movi_i32(tmp, val);
6697 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6699 tmp = load_reg(s, i);
6701 gen_st32(tmp, addr, IS_USER(s));
6704 /* no need to add after the last transfer */
6706 tcg_gen_addi_i32(addr, addr, 4);
6709 if (insn & (1 << 21)) {
6711 if (insn & (1 << 23)) {
6712 if (insn & (1 << 24)) {
6715 /* post increment */
6716 tcg_gen_addi_i32(addr, addr, 4);
6719 if (insn & (1 << 24)) {
6722 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6724 /* post decrement */
6725 tcg_gen_addi_i32(addr, addr, -(n * 4));
6728 store_reg(s, rn, addr);
6733 store_reg(s, rn, loaded_var);
6735 if ((insn & (1 << 22)) && !user) {
6736 /* Restore CPSR from SPSR. */
6737 tmp = load_cpu_field(spsr);
6738 gen_set_cpsr(tmp, 0xffffffff);
6740 s->is_jmp = DISAS_UPDATE;
6749 /* branch (and link) */
6750 val = (int32_t)s->pc;
6751 if (insn & (1 << 24)) {
6753 tcg_gen_movi_i32(tmp, val);
6754 store_reg(s, 14, tmp);
6756 offset = (((int32_t)insn << 8) >> 8);
6757 val += (offset << 2) + 4;
6765 if (disas_coproc_insn(env, s, insn))
6770 gen_set_pc_im(s->pc);
6771 s->is_jmp = DISAS_SWI;
6775 gen_set_condexec(s);
6776 gen_set_pc_im(s->pc - 4);
6777 gen_exception(EXCP_UDEF);
6778 s->is_jmp = DISAS_JUMP;
6784 /* Return true if this is a Thumb-2 logical op. */
6786 thumb2_logic_op(int op)
6791 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6792 then set condition code flags based on the result of the operation.
6793 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6794 to the high bit of T1.
6795 Returns zero if the opcode is valid. */
6798 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6805 gen_op_andl_T0_T1();
6809 gen_op_bicl_T0_T1();
6822 gen_op_xorl_T0_T1();
6827 gen_op_addl_T0_T1_cc();
6829 gen_op_addl_T0_T1();
6833 gen_op_adcl_T0_T1_cc();
6839 gen_op_sbcl_T0_T1_cc();
6845 gen_op_subl_T0_T1_cc();
6847 gen_op_subl_T0_T1();
6851 gen_op_rsbl_T0_T1_cc();
6853 gen_op_rsbl_T0_T1();
6855 default: /* 5, 6, 7, 9, 12, 15. */
6859 gen_op_logic_T0_cc();
6861 gen_set_CF_bit31(cpu_T[1]);
6866 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6868 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6870 uint32_t insn, imm, shift, offset;
6871 uint32_t rd, rn, rm, rs;
6881 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6882 || arm_feature (env, ARM_FEATURE_M))) {
6883 /* Thumb-1 cores may need to treat bl and blx as a pair of
6884 16-bit instructions to get correct prefetch abort behavior. */
6886 if ((insn & (1 << 12)) == 0) {
6887 /* Second half of blx. */
6888 offset = ((insn & 0x7ff) << 1);
6889 tmp = load_reg(s, 14);
6890 tcg_gen_addi_i32(tmp, tmp, offset);
6891 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
6894 tcg_gen_movi_i32(tmp2, s->pc | 1);
6895 store_reg(s, 14, tmp2);
6899 if (insn & (1 << 11)) {
6900 /* Second half of bl. */
6901 offset = ((insn & 0x7ff) << 1) | 1;
6902 tmp = load_reg(s, 14);
6903 tcg_gen_addi_i32(tmp, tmp, offset);
6906 tcg_gen_movi_i32(tmp2, s->pc | 1);
6907 store_reg(s, 14, tmp2);
6911 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6912 /* Instruction spans a page boundary. Implement it as two
6913 16-bit instructions in case the second half causes an
6915 offset = ((int32_t)insn << 21) >> 9;
6916 gen_op_movl_T0_im(s->pc + 2 + offset);
6917 gen_movl_reg_T0(s, 14);
6920 /* Fall through to 32-bit decode. */
6923 insn = lduw_code(s->pc);
6925 insn |= (uint32_t)insn_hw1 << 16;
6927 if ((insn & 0xf800e800) != 0xf000e800) {
6931 rn = (insn >> 16) & 0xf;
6932 rs = (insn >> 12) & 0xf;
6933 rd = (insn >> 8) & 0xf;
6935 switch ((insn >> 25) & 0xf) {
6936 case 0: case 1: case 2: case 3:
6937 /* 16-bit instructions. Should never happen. */
6940 if (insn & (1 << 22)) {
6941 /* Other load/store, table branch. */
6942 if (insn & 0x01200000) {
6943 /* Load/store doubleword. */
6946 tcg_gen_movi_i32(addr, s->pc & ~3);
6948 addr = load_reg(s, rn);
6950 offset = (insn & 0xff) * 4;
6951 if ((insn & (1 << 23)) == 0)
6953 if (insn & (1 << 24)) {
6954 tcg_gen_addi_i32(addr, addr, offset);
6957 if (insn & (1 << 20)) {
6959 tmp = gen_ld32(addr, IS_USER(s));
6960 store_reg(s, rs, tmp);
6961 tcg_gen_addi_i32(addr, addr, 4);
6962 tmp = gen_ld32(addr, IS_USER(s));
6963 store_reg(s, rd, tmp);
6966 tmp = load_reg(s, rs);
6967 gen_st32(tmp, addr, IS_USER(s));
6968 tcg_gen_addi_i32(addr, addr, 4);
6969 tmp = load_reg(s, rd);
6970 gen_st32(tmp, addr, IS_USER(s));
6972 if (insn & (1 << 21)) {
6973 /* Base writeback. */
6976 tcg_gen_addi_i32(addr, addr, offset - 4);
6977 store_reg(s, rn, addr);
6981 } else if ((insn & (1 << 23)) == 0) {
6982 /* Load/store exclusive word. */
6983 gen_movl_T1_reg(s, rn);
6985 if (insn & (1 << 20)) {
6986 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6987 tmp = gen_ld32(addr, IS_USER(s));
6988 store_reg(s, rd, tmp);
6990 int label = gen_new_label();
6991 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6992 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6994 tmp = load_reg(s, rs);
6995 gen_st32(tmp, cpu_T[1], IS_USER(s));
6996 gen_set_label(label);
6997 gen_movl_reg_T0(s, rd);
6999 } else if ((insn & (1 << 6)) == 0) {
7003 tcg_gen_movi_i32(addr, s->pc);
7005 addr = load_reg(s, rn);
7007 tmp = load_reg(s, rm);
7008 tcg_gen_add_i32(addr, addr, tmp);
7009 if (insn & (1 << 4)) {
7011 tcg_gen_add_i32(addr, addr, tmp);
7013 tmp = gen_ld16u(addr, IS_USER(s));
7016 tmp = gen_ld8u(addr, IS_USER(s));
7019 tcg_gen_shli_i32(tmp, tmp, 1);
7020 tcg_gen_addi_i32(tmp, tmp, s->pc);
7021 store_reg(s, 15, tmp);
7023 /* Load/store exclusive byte/halfword/doubleword. */
7024 /* ??? These are not really atomic. However we know
7025 we never have multiple CPUs running in parallel,
7026 so it is good enough. */
7027 op = (insn >> 4) & 0x3;
7028 /* Must use a global reg for the address because we have
7029 a conditional branch in the store instruction. */
7030 gen_movl_T1_reg(s, rn);
7032 if (insn & (1 << 20)) {
7033 gen_helper_mark_exclusive(cpu_env, addr);
7036 tmp = gen_ld8u(addr, IS_USER(s));
7039 tmp = gen_ld16u(addr, IS_USER(s));
7042 tmp = gen_ld32(addr, IS_USER(s));
7043 tcg_gen_addi_i32(addr, addr, 4);
7044 tmp2 = gen_ld32(addr, IS_USER(s));
7045 store_reg(s, rd, tmp2);
7050 store_reg(s, rs, tmp);
7052 int label = gen_new_label();
7053 /* Must use a global that is not killed by the branch. */
7054 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7055 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7056 tmp = load_reg(s, rs);
7059 gen_st8(tmp, addr, IS_USER(s));
7062 gen_st16(tmp, addr, IS_USER(s));
7065 gen_st32(tmp, addr, IS_USER(s));
7066 tcg_gen_addi_i32(addr, addr, 4);
7067 tmp = load_reg(s, rd);
7068 gen_st32(tmp, addr, IS_USER(s));
7073 gen_set_label(label);
7074 gen_movl_reg_T0(s, rm);
7078 /* Load/store multiple, RFE, SRS. */
7079 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7080 /* Not available in user mode. */
7083 if (insn & (1 << 20)) {
7085 addr = load_reg(s, rn);
7086 if ((insn & (1 << 24)) == 0)
7087 tcg_gen_addi_i32(addr, addr, -8);
7088 /* Load PC into tmp and CPSR into tmp2. */
7089 tmp = gen_ld32(addr, 0);
7090 tcg_gen_addi_i32(addr, addr, 4);
7091 tmp2 = gen_ld32(addr, 0);
7092 if (insn & (1 << 21)) {
7093 /* Base writeback. */
7094 if (insn & (1 << 24)) {
7095 tcg_gen_addi_i32(addr, addr, 4);
7097 tcg_gen_addi_i32(addr, addr, -4);
7099 store_reg(s, rn, addr);
7103 gen_rfe(s, tmp, tmp2);
7107 if (op == (env->uncached_cpsr & CPSR_M)) {
7108 addr = load_reg(s, 13);
7111 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7113 if ((insn & (1 << 24)) == 0) {
7114 tcg_gen_addi_i32(addr, addr, -8);
7116 tmp = load_reg(s, 14);
7117 gen_st32(tmp, addr, 0);
7118 tcg_gen_addi_i32(addr, addr, 4);
7120 gen_helper_cpsr_read(tmp);
7121 gen_st32(tmp, addr, 0);
7122 if (insn & (1 << 21)) {
7123 if ((insn & (1 << 24)) == 0) {
7124 tcg_gen_addi_i32(addr, addr, -4);
7126 tcg_gen_addi_i32(addr, addr, 4);
7128 if (op == (env->uncached_cpsr & CPSR_M)) {
7129 store_reg(s, 13, addr);
7131 gen_helper_set_r13_banked(cpu_env,
7132 tcg_const_i32(op), addr);
7140 /* Load/store multiple. */
7141 addr = load_reg(s, rn);
7143 for (i = 0; i < 16; i++) {
7144 if (insn & (1 << i))
7147 if (insn & (1 << 24)) {
7148 tcg_gen_addi_i32(addr, addr, -offset);
7151 for (i = 0; i < 16; i++) {
7152 if ((insn & (1 << i)) == 0)
7154 if (insn & (1 << 20)) {
7156 tmp = gen_ld32(addr, IS_USER(s));
7160 store_reg(s, i, tmp);
7164 tmp = load_reg(s, i);
7165 gen_st32(tmp, addr, IS_USER(s));
7167 tcg_gen_addi_i32(addr, addr, 4);
7169 if (insn & (1 << 21)) {
7170 /* Base register writeback. */
7171 if (insn & (1 << 24)) {
7172 tcg_gen_addi_i32(addr, addr, -offset);
7174 /* Fault if writeback register is in register list. */
7175 if (insn & (1 << rn))
7177 store_reg(s, rn, addr);
7184 case 5: /* Data processing register constant shift. */
7186 gen_op_movl_T0_im(0);
7188 gen_movl_T0_reg(s, rn);
7189 gen_movl_T1_reg(s, rm);
7190 op = (insn >> 21) & 0xf;
7191 shiftop = (insn >> 4) & 3;
7192 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7193 conds = (insn & (1 << 20)) != 0;
7194 logic_cc = (conds && thumb2_logic_op(op));
7195 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7196 if (gen_thumb2_data_op(s, op, conds, 0))
7199 gen_movl_reg_T0(s, rd);
7201 case 13: /* Misc data processing. */
7202 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7203 if (op < 4 && (insn & 0xf000) != 0xf000)
7206 case 0: /* Register controlled shift. */
7207 tmp = load_reg(s, rn);
7208 tmp2 = load_reg(s, rm);
7209 if ((insn & 0x70) != 0)
7211 op = (insn >> 21) & 3;
7212 logic_cc = (insn & (1 << 20)) != 0;
7213 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7216 store_reg(s, rd, tmp);
7218 case 1: /* Sign/zero extend. */
7219 tmp = load_reg(s, rm);
7220 shift = (insn >> 4) & 3;
7221 /* ??? In many cases it's not neccessary to do a
7222 rotate, a shift is sufficient. */
7224 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7225 op = (insn >> 20) & 7;
7227 case 0: gen_sxth(tmp); break;
7228 case 1: gen_uxth(tmp); break;
7229 case 2: gen_sxtb16(tmp); break;
7230 case 3: gen_uxtb16(tmp); break;
7231 case 4: gen_sxtb(tmp); break;
7232 case 5: gen_uxtb(tmp); break;
7233 default: goto illegal_op;
7236 tmp2 = load_reg(s, rn);
7237 if ((op >> 1) == 1) {
7238 gen_add16(tmp, tmp2);
7240 tcg_gen_add_i32(tmp, tmp, tmp2);
7244 store_reg(s, rd, tmp);
7246 case 2: /* SIMD add/subtract. */
7247 op = (insn >> 20) & 7;
7248 shift = (insn >> 4) & 7;
7249 if ((op & 3) == 3 || (shift & 3) == 3)
7251 tmp = load_reg(s, rn);
7252 tmp2 = load_reg(s, rm);
7253 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7255 store_reg(s, rd, tmp);
7257 case 3: /* Other data processing. */
7258 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7260 /* Saturating add/subtract. */
7261 tmp = load_reg(s, rn);
7262 tmp2 = load_reg(s, rm);
7264 gen_helper_double_saturate(tmp, tmp);
7266 gen_helper_sub_saturate(tmp, tmp2, tmp);
7268 gen_helper_add_saturate(tmp, tmp, tmp2);
7271 tmp = load_reg(s, rn);
7273 case 0x0a: /* rbit */
7274 gen_helper_rbit(tmp, tmp);
7276 case 0x08: /* rev */
7277 tcg_gen_bswap_i32(tmp, tmp);
7279 case 0x09: /* rev16 */
7282 case 0x0b: /* revsh */
7285 case 0x10: /* sel */
7286 tmp2 = load_reg(s, rm);
7288 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7289 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7293 case 0x18: /* clz */
7294 gen_helper_clz(tmp, tmp);
7300 store_reg(s, rd, tmp);
7302 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7303 op = (insn >> 4) & 0xf;
7304 tmp = load_reg(s, rn);
7305 tmp2 = load_reg(s, rm);
7306 switch ((insn >> 20) & 7) {
7307 case 0: /* 32 x 32 -> 32 */
7308 tcg_gen_mul_i32(tmp, tmp, tmp2);
7311 tmp2 = load_reg(s, rs);
7313 tcg_gen_sub_i32(tmp, tmp2, tmp);
7315 tcg_gen_add_i32(tmp, tmp, tmp2);
7319 case 1: /* 16 x 16 -> 32 */
7320 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7323 tmp2 = load_reg(s, rs);
7324 gen_helper_add_setq(tmp, tmp, tmp2);
7328 case 2: /* Dual multiply add. */
7329 case 4: /* Dual multiply subtract. */
7331 gen_swap_half(tmp2);
7332 gen_smul_dual(tmp, tmp2);
7333 /* This addition cannot overflow. */
7334 if (insn & (1 << 22)) {
7335 tcg_gen_sub_i32(tmp, tmp, tmp2);
7337 tcg_gen_add_i32(tmp, tmp, tmp2);
7342 tmp2 = load_reg(s, rs);
7343 gen_helper_add_setq(tmp, tmp, tmp2);
7347 case 3: /* 32 * 16 -> 32msb */
7349 tcg_gen_sari_i32(tmp2, tmp2, 16);
7352 tmp2 = gen_muls_i64_i32(tmp, tmp2);
7353 tcg_gen_shri_i64(tmp2, tmp2, 16);
7355 tcg_gen_trunc_i64_i32(tmp, tmp2);
7358 tmp2 = load_reg(s, rs);
7359 gen_helper_add_setq(tmp, tmp, tmp2);
7363 case 5: case 6: /* 32 * 32 -> 32msb */
7364 gen_imull(tmp, tmp2);
7365 if (insn & (1 << 5)) {
7366 gen_roundqd(tmp, tmp2);
7373 tmp2 = load_reg(s, rs);
7374 if (insn & (1 << 21)) {
7375 tcg_gen_add_i32(tmp, tmp, tmp2);
7377 tcg_gen_sub_i32(tmp, tmp2, tmp);
7382 case 7: /* Unsigned sum of absolute differences. */
7383 gen_helper_usad8(tmp, tmp, tmp2);
7386 tmp2 = load_reg(s, rs);
7387 tcg_gen_add_i32(tmp, tmp, tmp2);
7392 store_reg(s, rd, tmp);
7394 case 6: case 7: /* 64-bit multiply, Divide. */
7395 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7396 tmp = load_reg(s, rn);
7397 tmp2 = load_reg(s, rm);
7398 if ((op & 0x50) == 0x10) {
7400 if (!arm_feature(env, ARM_FEATURE_DIV))
7403 gen_helper_udiv(tmp, tmp, tmp2);
7405 gen_helper_sdiv(tmp, tmp, tmp2);
7407 store_reg(s, rd, tmp);
7408 } else if ((op & 0xe) == 0xc) {
7409 /* Dual multiply accumulate long. */
7411 gen_swap_half(tmp2);
7412 gen_smul_dual(tmp, tmp2);
7414 tcg_gen_sub_i32(tmp, tmp, tmp2);
7416 tcg_gen_add_i32(tmp, tmp, tmp2);
7419 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7420 gen_addq(s, tmp, rs, rd);
7421 gen_storeq_reg(s, rs, rd, tmp);
7424 /* Unsigned 64-bit multiply */
7425 tmp = gen_mulu_i64_i32(tmp, tmp2);
7429 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7431 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7432 tcg_gen_ext_i32_i64(tmp2, tmp);
7436 /* Signed 64-bit multiply */
7437 tmp = gen_muls_i64_i32(tmp, tmp2);
7442 gen_addq_lo(s, tmp, rs);
7443 gen_addq_lo(s, tmp, rd);
7444 } else if (op & 0x40) {
7445 /* 64-bit accumulate. */
7446 gen_addq(s, tmp, rs, rd);
7448 gen_storeq_reg(s, rs, rd, tmp);
7453 case 6: case 7: case 14: case 15:
7455 if (((insn >> 24) & 3) == 3) {
7456 /* Translate into the equivalent ARM encoding. */
7457 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7458 if (disas_neon_data_insn(env, s, insn))
7461 if (insn & (1 << 28))
7463 if (disas_coproc_insn (env, s, insn))
7467 case 8: case 9: case 10: case 11:
7468 if (insn & (1 << 15)) {
7469 /* Branches, misc control. */
7470 if (insn & 0x5000) {
7471 /* Unconditional branch. */
7472 /* signextend(hw1[10:0]) -> offset[:12]. */
7473 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7474 /* hw1[10:0] -> offset[11:1]. */
7475 offset |= (insn & 0x7ff) << 1;
7476 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7477 offset[24:22] already have the same value because of the
7478 sign extension above. */
7479 offset ^= ((~insn) & (1 << 13)) << 10;
7480 offset ^= ((~insn) & (1 << 11)) << 11;
7482 if (insn & (1 << 14)) {
7483 /* Branch and link. */
7484 gen_op_movl_T1_im(s->pc | 1);
7485 gen_movl_reg_T1(s, 14);
7489 if (insn & (1 << 12)) {
7494 offset &= ~(uint32_t)2;
7495 gen_bx_im(s, offset);
7497 } else if (((insn >> 23) & 7) == 7) {
7499 if (insn & (1 << 13))
7502 if (insn & (1 << 26)) {
7503 /* Secure monitor call (v6Z) */
7504 goto illegal_op; /* not implemented. */
7506 op = (insn >> 20) & 7;
7508 case 0: /* msr cpsr. */
7510 tmp = load_reg(s, rn);
7511 addr = tcg_const_i32(insn & 0xff);
7512 gen_helper_v7m_msr(cpu_env, addr, tmp);
7517 case 1: /* msr spsr. */
7520 gen_movl_T0_reg(s, rn);
7521 if (gen_set_psr_T0(s,
7522 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7526 case 2: /* cps, nop-hint. */
7527 if (((insn >> 8) & 7) == 0) {
7528 gen_nop_hint(s, insn & 0xff);
7530 /* Implemented as NOP in user mode. */
7535 if (insn & (1 << 10)) {
7536 if (insn & (1 << 7))
7538 if (insn & (1 << 6))
7540 if (insn & (1 << 5))
7542 if (insn & (1 << 9))
7543 imm = CPSR_A | CPSR_I | CPSR_F;
7545 if (insn & (1 << 8)) {
7547 imm |= (insn & 0x1f);
7550 gen_op_movl_T0_im(imm);
7551 gen_set_psr_T0(s, offset, 0);
7554 case 3: /* Special control operations. */
7555 op = (insn >> 4) & 0xf;
7558 gen_helper_clrex(cpu_env);
7563 /* These execute as NOPs. */
7571 /* Trivial implementation equivalent to bx. */
7572 tmp = load_reg(s, rn);
7575 case 5: /* Exception return. */
7576 /* Unpredictable in user mode. */
7578 case 6: /* mrs cpsr. */
7581 addr = tcg_const_i32(insn & 0xff);
7582 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7584 gen_helper_cpsr_read(tmp);
7586 store_reg(s, rd, tmp);
7588 case 7: /* mrs spsr. */
7589 /* Not accessible in user mode. */
7590 if (IS_USER(s) || IS_M(env))
7592 tmp = load_cpu_field(spsr);
7593 store_reg(s, rd, tmp);
7598 /* Conditional branch. */
7599 op = (insn >> 22) & 0xf;
7600 /* Generate a conditional jump to next instruction. */
7601 s->condlabel = gen_new_label();
7602 gen_test_cc(op ^ 1, s->condlabel);
7605 /* offset[11:1] = insn[10:0] */
7606 offset = (insn & 0x7ff) << 1;
7607 /* offset[17:12] = insn[21:16]. */
7608 offset |= (insn & 0x003f0000) >> 4;
7609 /* offset[31:20] = insn[26]. */
7610 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7611 /* offset[18] = insn[13]. */
7612 offset |= (insn & (1 << 13)) << 5;
7613 /* offset[19] = insn[11]. */
7614 offset |= (insn & (1 << 11)) << 8;
7616 /* jump to the offset */
7617 gen_jmp(s, s->pc + offset);
7620 /* Data processing immediate. */
7621 if (insn & (1 << 25)) {
7622 if (insn & (1 << 24)) {
7623 if (insn & (1 << 20))
7625 /* Bitfield/Saturate. */
7626 op = (insn >> 21) & 7;
7628 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7631 tcg_gen_movi_i32(tmp, 0);
7633 tmp = load_reg(s, rn);
7636 case 2: /* Signed bitfield extract. */
7638 if (shift + imm > 32)
7641 gen_sbfx(tmp, shift, imm);
7643 case 6: /* Unsigned bitfield extract. */
7645 if (shift + imm > 32)
7648 gen_ubfx(tmp, shift, (1u << imm) - 1);
7650 case 3: /* Bitfield insert/clear. */
7653 imm = imm + 1 - shift;
7655 tmp2 = load_reg(s, rd);
7656 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7662 default: /* Saturate. */
7665 tcg_gen_sari_i32(tmp, tmp, shift);
7667 tcg_gen_shli_i32(tmp, tmp, shift);
7669 tmp2 = tcg_const_i32(imm);
7672 if ((op & 1) && shift == 0)
7673 gen_helper_usat16(tmp, tmp, tmp2);
7675 gen_helper_usat(tmp, tmp, tmp2);
7678 if ((op & 1) && shift == 0)
7679 gen_helper_ssat16(tmp, tmp, tmp2);
7681 gen_helper_ssat(tmp, tmp, tmp2);
7685 store_reg(s, rd, tmp);
7687 imm = ((insn & 0x04000000) >> 15)
7688 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7689 if (insn & (1 << 22)) {
7690 /* 16-bit immediate. */
7691 imm |= (insn >> 4) & 0xf000;
7692 if (insn & (1 << 23)) {
7694 tmp = load_reg(s, rd);
7695 tcg_gen_ext16u_i32(tmp, tmp);
7696 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7700 tcg_gen_movi_i32(tmp, imm);
7703 /* Add/sub 12-bit immediate. */
7705 offset = s->pc & ~(uint32_t)3;
7706 if (insn & (1 << 23))
7711 tcg_gen_movi_i32(tmp, offset);
7713 tmp = load_reg(s, rn);
7714 if (insn & (1 << 23))
7715 tcg_gen_subi_i32(tmp, tmp, imm);
7717 tcg_gen_addi_i32(tmp, tmp, imm);
7720 store_reg(s, rd, tmp);
7723 int shifter_out = 0;
7724 /* modified 12-bit immediate. */
7725 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7726 imm = (insn & 0xff);
7729 /* Nothing to do. */
7731 case 1: /* 00XY00XY */
7734 case 2: /* XY00XY00 */
7738 case 3: /* XYXYXYXY */
7742 default: /* Rotated constant. */
7743 shift = (shift << 1) | (imm >> 7);
7745 imm = imm << (32 - shift);
7749 gen_op_movl_T1_im(imm);
7750 rn = (insn >> 16) & 0xf;
7752 gen_op_movl_T0_im(0);
7754 gen_movl_T0_reg(s, rn);
7755 op = (insn >> 21) & 0xf;
7756 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7759 rd = (insn >> 8) & 0xf;
7761 gen_movl_reg_T0(s, rd);
7766 case 12: /* Load/store single data item. */
7771 if ((insn & 0x01100000) == 0x01000000) {
7772 if (disas_neon_ls_insn(env, s, insn))
7780 /* s->pc has already been incremented by 4. */
7781 imm = s->pc & 0xfffffffc;
7782 if (insn & (1 << 23))
7783 imm += insn & 0xfff;
7785 imm -= insn & 0xfff;
7786 tcg_gen_movi_i32(addr, imm);
7788 addr = load_reg(s, rn);
7789 if (insn & (1 << 23)) {
7790 /* Positive offset. */
7792 tcg_gen_addi_i32(addr, addr, imm);
7794 op = (insn >> 8) & 7;
7797 case 0: case 8: /* Shifted Register. */
7798 shift = (insn >> 4) & 0xf;
7801 tmp = load_reg(s, rm);
7803 tcg_gen_shli_i32(tmp, tmp, shift);
7804 tcg_gen_add_i32(addr, addr, tmp);
7807 case 4: /* Negative offset. */
7808 tcg_gen_addi_i32(addr, addr, -imm);
7810 case 6: /* User privilege. */
7811 tcg_gen_addi_i32(addr, addr, imm);
7814 case 1: /* Post-decrement. */
7817 case 3: /* Post-increment. */
7821 case 5: /* Pre-decrement. */
7824 case 7: /* Pre-increment. */
7825 tcg_gen_addi_i32(addr, addr, imm);
7833 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7834 if (insn & (1 << 20)) {
7836 if (rs == 15 && op != 2) {
7839 /* Memory hint. Implemented as NOP. */
7842 case 0: tmp = gen_ld8u(addr, user); break;
7843 case 4: tmp = gen_ld8s(addr, user); break;
7844 case 1: tmp = gen_ld16u(addr, user); break;
7845 case 5: tmp = gen_ld16s(addr, user); break;
7846 case 2: tmp = gen_ld32(addr, user); break;
7847 default: goto illegal_op;
7852 store_reg(s, rs, tmp);
7859 tmp = load_reg(s, rs);
7861 case 0: gen_st8(tmp, addr, user); break;
7862 case 1: gen_st16(tmp, addr, user); break;
7863 case 2: gen_st32(tmp, addr, user); break;
7864 default: goto illegal_op;
7868 tcg_gen_addi_i32(addr, addr, imm);
7870 store_reg(s, rn, addr);
7884 static void disas_thumb_insn(CPUState *env, DisasContext *s)
7886 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7893 if (s->condexec_mask) {
7894 cond = s->condexec_cond;
7895 s->condlabel = gen_new_label();
7896 gen_test_cc(cond ^ 1, s->condlabel);
7900 insn = lduw_code(s->pc);
7903 switch (insn >> 12) {
7906 op = (insn >> 11) & 3;
7909 rn = (insn >> 3) & 7;
7910 gen_movl_T0_reg(s, rn);
7911 if (insn & (1 << 10)) {
7913 gen_op_movl_T1_im((insn >> 6) & 7);
7916 rm = (insn >> 6) & 7;
7917 gen_movl_T1_reg(s, rm);
7919 if (insn & (1 << 9)) {
7920 if (s->condexec_mask)
7921 gen_op_subl_T0_T1();
7923 gen_op_subl_T0_T1_cc();
7925 if (s->condexec_mask)
7926 gen_op_addl_T0_T1();
7928 gen_op_addl_T0_T1_cc();
7930 gen_movl_reg_T0(s, rd);
7932 /* shift immediate */
7933 rm = (insn >> 3) & 7;
7934 shift = (insn >> 6) & 0x1f;
7935 tmp = load_reg(s, rm);
7936 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7937 if (!s->condexec_mask)
7939 store_reg(s, rd, tmp);
7943 /* arithmetic large immediate */
7944 op = (insn >> 11) & 3;
7945 rd = (insn >> 8) & 0x7;
7947 gen_op_movl_T0_im(insn & 0xff);
7949 gen_movl_T0_reg(s, rd);
7950 gen_op_movl_T1_im(insn & 0xff);
7954 if (!s->condexec_mask)
7955 gen_op_logic_T0_cc();
7958 gen_op_subl_T0_T1_cc();
7961 if (s->condexec_mask)
7962 gen_op_addl_T0_T1();
7964 gen_op_addl_T0_T1_cc();
7967 if (s->condexec_mask)
7968 gen_op_subl_T0_T1();
7970 gen_op_subl_T0_T1_cc();
7974 gen_movl_reg_T0(s, rd);
7977 if (insn & (1 << 11)) {
7978 rd = (insn >> 8) & 7;
7979 /* load pc-relative. Bit 1 of PC is ignored. */
7980 val = s->pc + 2 + ((insn & 0xff) * 4);
7981 val &= ~(uint32_t)2;
7983 tcg_gen_movi_i32(addr, val);
7984 tmp = gen_ld32(addr, IS_USER(s));
7986 store_reg(s, rd, tmp);
7989 if (insn & (1 << 10)) {
7990 /* data processing extended or blx */
7991 rd = (insn & 7) | ((insn >> 4) & 8);
7992 rm = (insn >> 3) & 0xf;
7993 op = (insn >> 8) & 3;
7996 gen_movl_T0_reg(s, rd);
7997 gen_movl_T1_reg(s, rm);
7998 gen_op_addl_T0_T1();
7999 gen_movl_reg_T0(s, rd);
8002 gen_movl_T0_reg(s, rd);
8003 gen_movl_T1_reg(s, rm);
8004 gen_op_subl_T0_T1_cc();
8006 case 2: /* mov/cpy */
8007 gen_movl_T0_reg(s, rm);
8008 gen_movl_reg_T0(s, rd);
8010 case 3:/* branch [and link] exchange thumb register */
8011 tmp = load_reg(s, rm);
8012 if (insn & (1 << 7)) {
8013 val = (uint32_t)s->pc | 1;
8015 tcg_gen_movi_i32(tmp2, val);
8016 store_reg(s, 14, tmp2);
8024 /* data processing register */
8026 rm = (insn >> 3) & 7;
8027 op = (insn >> 6) & 0xf;
8028 if (op == 2 || op == 3 || op == 4 || op == 7) {
8029 /* the shift/rotate ops want the operands backwards */
8038 if (op == 9) /* neg */
8039 gen_op_movl_T0_im(0);
8040 else if (op != 0xf) /* mvn doesn't read its first operand */
8041 gen_movl_T0_reg(s, rd);
8043 gen_movl_T1_reg(s, rm);
8046 gen_op_andl_T0_T1();
8047 if (!s->condexec_mask)
8048 gen_op_logic_T0_cc();
8051 gen_op_xorl_T0_T1();
8052 if (!s->condexec_mask)
8053 gen_op_logic_T0_cc();
8056 if (s->condexec_mask) {
8057 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8059 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8060 gen_op_logic_T1_cc();
8064 if (s->condexec_mask) {
8065 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8067 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8068 gen_op_logic_T1_cc();
8072 if (s->condexec_mask) {
8073 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8075 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8076 gen_op_logic_T1_cc();
8080 if (s->condexec_mask)
8083 gen_op_adcl_T0_T1_cc();
8086 if (s->condexec_mask)
8089 gen_op_sbcl_T0_T1_cc();
8092 if (s->condexec_mask) {
8093 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8095 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8096 gen_op_logic_T1_cc();
8100 gen_op_andl_T0_T1();
8101 gen_op_logic_T0_cc();
8105 if (s->condexec_mask)
8106 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8108 gen_op_subl_T0_T1_cc();
8111 gen_op_subl_T0_T1_cc();
8115 gen_op_addl_T0_T1_cc();
8120 if (!s->condexec_mask)
8121 gen_op_logic_T0_cc();
8124 gen_op_mull_T0_T1();
8125 if (!s->condexec_mask)
8126 gen_op_logic_T0_cc();
8129 gen_op_bicl_T0_T1();
8130 if (!s->condexec_mask)
8131 gen_op_logic_T0_cc();
8135 if (!s->condexec_mask)
8136 gen_op_logic_T1_cc();
8143 gen_movl_reg_T1(s, rm);
8145 gen_movl_reg_T0(s, rd);
8150 /* load/store register offset. */
8152 rn = (insn >> 3) & 7;
8153 rm = (insn >> 6) & 7;
8154 op = (insn >> 9) & 7;
8155 addr = load_reg(s, rn);
8156 tmp = load_reg(s, rm);
8157 tcg_gen_add_i32(addr, addr, tmp);
8160 if (op < 3) /* store */
8161 tmp = load_reg(s, rd);
8165 gen_st32(tmp, addr, IS_USER(s));
8168 gen_st16(tmp, addr, IS_USER(s));
8171 gen_st8(tmp, addr, IS_USER(s));
8174 tmp = gen_ld8s(addr, IS_USER(s));
8177 tmp = gen_ld32(addr, IS_USER(s));
8180 tmp = gen_ld16u(addr, IS_USER(s));
8183 tmp = gen_ld8u(addr, IS_USER(s));
8186 tmp = gen_ld16s(addr, IS_USER(s));
8189 if (op >= 3) /* load */
8190 store_reg(s, rd, tmp);
8195 /* load/store word immediate offset */
8197 rn = (insn >> 3) & 7;
8198 addr = load_reg(s, rn);
8199 val = (insn >> 4) & 0x7c;
8200 tcg_gen_addi_i32(addr, addr, val);
8202 if (insn & (1 << 11)) {
8204 tmp = gen_ld32(addr, IS_USER(s));
8205 store_reg(s, rd, tmp);
8208 tmp = load_reg(s, rd);
8209 gen_st32(tmp, addr, IS_USER(s));
8215 /* load/store byte immediate offset */
8217 rn = (insn >> 3) & 7;
8218 addr = load_reg(s, rn);
8219 val = (insn >> 6) & 0x1f;
8220 tcg_gen_addi_i32(addr, addr, val);
8222 if (insn & (1 << 11)) {
8224 tmp = gen_ld8u(addr, IS_USER(s));
8225 store_reg(s, rd, tmp);
8228 tmp = load_reg(s, rd);
8229 gen_st8(tmp, addr, IS_USER(s));
8235 /* load/store halfword immediate offset */
8237 rn = (insn >> 3) & 7;
8238 addr = load_reg(s, rn);
8239 val = (insn >> 5) & 0x3e;
8240 tcg_gen_addi_i32(addr, addr, val);
8242 if (insn & (1 << 11)) {
8244 tmp = gen_ld16u(addr, IS_USER(s));
8245 store_reg(s, rd, tmp);
8248 tmp = load_reg(s, rd);
8249 gen_st16(tmp, addr, IS_USER(s));
8255 /* load/store from stack */
8256 rd = (insn >> 8) & 7;
8257 addr = load_reg(s, 13);
8258 val = (insn & 0xff) * 4;
8259 tcg_gen_addi_i32(addr, addr, val);
8261 if (insn & (1 << 11)) {
8263 tmp = gen_ld32(addr, IS_USER(s));
8264 store_reg(s, rd, tmp);
8267 tmp = load_reg(s, rd);
8268 gen_st32(tmp, addr, IS_USER(s));
8274 /* add to high reg */
8275 rd = (insn >> 8) & 7;
8276 if (insn & (1 << 11)) {
8278 tmp = load_reg(s, 13);
8280 /* PC. bit 1 is ignored. */
8282 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8284 val = (insn & 0xff) * 4;
8285 tcg_gen_addi_i32(tmp, tmp, val);
8286 store_reg(s, rd, tmp);
8291 op = (insn >> 8) & 0xf;
8294 /* adjust stack pointer */
8295 tmp = load_reg(s, 13);
8296 val = (insn & 0x7f) * 4;
8297 if (insn & (1 << 7))
8298 val = -(int32_t)val;
8299 tcg_gen_addi_i32(tmp, tmp, val);
8300 store_reg(s, 13, tmp);
8303 case 2: /* sign/zero extend. */
8306 rm = (insn >> 3) & 7;
8307 tmp = load_reg(s, rm);
8308 switch ((insn >> 6) & 3) {
8309 case 0: gen_sxth(tmp); break;
8310 case 1: gen_sxtb(tmp); break;
8311 case 2: gen_uxth(tmp); break;
8312 case 3: gen_uxtb(tmp); break;
8314 store_reg(s, rd, tmp);
8316 case 4: case 5: case 0xc: case 0xd:
8318 addr = load_reg(s, 13);
8319 if (insn & (1 << 8))
8323 for (i = 0; i < 8; i++) {
8324 if (insn & (1 << i))
8327 if ((insn & (1 << 11)) == 0) {
8328 tcg_gen_addi_i32(addr, addr, -offset);
8330 for (i = 0; i < 8; i++) {
8331 if (insn & (1 << i)) {
8332 if (insn & (1 << 11)) {
8334 tmp = gen_ld32(addr, IS_USER(s));
8335 store_reg(s, i, tmp);
8338 tmp = load_reg(s, i);
8339 gen_st32(tmp, addr, IS_USER(s));
8341 /* advance to the next address. */
8342 tcg_gen_addi_i32(addr, addr, 4);
8346 if (insn & (1 << 8)) {
8347 if (insn & (1 << 11)) {
8349 tmp = gen_ld32(addr, IS_USER(s));
8350 /* don't set the pc until the rest of the instruction
8354 tmp = load_reg(s, 14);
8355 gen_st32(tmp, addr, IS_USER(s));
8357 tcg_gen_addi_i32(addr, addr, 4);
8359 if ((insn & (1 << 11)) == 0) {
8360 tcg_gen_addi_i32(addr, addr, -offset);
8362 /* write back the new stack pointer */
8363 store_reg(s, 13, addr);
8364 /* set the new PC value */
8365 if ((insn & 0x0900) == 0x0900)
8369 case 1: case 3: case 9: case 11: /* czb */
8371 tmp = load_reg(s, rm);
8372 s->condlabel = gen_new_label();
8374 if (insn & (1 << 11))
8375 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8377 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8379 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8380 val = (uint32_t)s->pc + 2;
8385 case 15: /* IT, nop-hint. */
8386 if ((insn & 0xf) == 0) {
8387 gen_nop_hint(s, (insn >> 4) & 0xf);
8391 s->condexec_cond = (insn >> 4) & 0xe;
8392 s->condexec_mask = insn & 0x1f;
8393 /* No actual code generated for this insn, just setup state. */
8396 case 0xe: /* bkpt */
8397 gen_set_condexec(s);
8398 gen_set_pc_im(s->pc - 2);
8399 gen_exception(EXCP_BKPT);
8400 s->is_jmp = DISAS_JUMP;
8405 rn = (insn >> 3) & 0x7;
8407 tmp = load_reg(s, rn);
8408 switch ((insn >> 6) & 3) {
8409 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8410 case 1: gen_rev16(tmp); break;
8411 case 3: gen_revsh(tmp); break;
8412 default: goto illegal_op;
8414 store_reg(s, rd, tmp);
8422 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8425 addr = tcg_const_i32(16);
8426 gen_helper_v7m_msr(cpu_env, addr, tmp);
8430 addr = tcg_const_i32(17);
8431 gen_helper_v7m_msr(cpu_env, addr, tmp);
8435 if (insn & (1 << 4))
8436 shift = CPSR_A | CPSR_I | CPSR_F;
8440 val = ((insn & 7) << 6) & shift;
8441 gen_op_movl_T0_im(val);
8442 gen_set_psr_T0(s, shift, 0);
8452 /* load/store multiple */
8453 rn = (insn >> 8) & 0x7;
8454 addr = load_reg(s, rn);
8455 for (i = 0; i < 8; i++) {
8456 if (insn & (1 << i)) {
8457 if (insn & (1 << 11)) {
8459 tmp = gen_ld32(addr, IS_USER(s));
8460 store_reg(s, i, tmp);
8463 tmp = load_reg(s, i);
8464 gen_st32(tmp, addr, IS_USER(s));
8466 /* advance to the next address */
8467 tcg_gen_addi_i32(addr, addr, 4);
8470 /* Base register writeback. */
8471 if ((insn & (1 << rn)) == 0) {
8472 store_reg(s, rn, addr);
8479 /* conditional branch or swi */
8480 cond = (insn >> 8) & 0xf;
8486 gen_set_condexec(s);
8487 gen_set_pc_im(s->pc);
8488 s->is_jmp = DISAS_SWI;
8491 /* generate a conditional jump to next instruction */
8492 s->condlabel = gen_new_label();
8493 gen_test_cc(cond ^ 1, s->condlabel);
8495 gen_movl_T1_reg(s, 15);
8497 /* jump to the offset */
8498 val = (uint32_t)s->pc + 2;
8499 offset = ((int32_t)insn << 24) >> 24;
8505 if (insn & (1 << 11)) {
8506 if (disas_thumb2_insn(env, s, insn))
8510 /* unconditional branch */
8511 val = (uint32_t)s->pc;
8512 offset = ((int32_t)insn << 21) >> 21;
8513 val += (offset << 1) + 2;
8518 if (disas_thumb2_insn(env, s, insn))
8524 gen_set_condexec(s);
8525 gen_set_pc_im(s->pc - 4);
8526 gen_exception(EXCP_UDEF);
8527 s->is_jmp = DISAS_JUMP;
8531 gen_set_condexec(s);
8532 gen_set_pc_im(s->pc - 2);
8533 gen_exception(EXCP_UDEF);
8534 s->is_jmp = DISAS_JUMP;
8537 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8538 basic block 'tb'. If search_pc is TRUE, also generate PC
8539 information for each intermediate instruction. */
8540 static inline void gen_intermediate_code_internal(CPUState *env,
8541 TranslationBlock *tb,
8544 DisasContext dc1, *dc = &dc1;
8545 uint16_t *gen_opc_end;
8547 target_ulong pc_start;
8548 uint32_t next_page_start;
8552 /* generate intermediate code */
8554 memset(temps, 0, sizeof(temps));
8560 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8562 dc->is_jmp = DISAS_NEXT;
8564 dc->singlestep_enabled = env->singlestep_enabled;
8566 dc->thumb = env->thumb;
8567 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8568 dc->condexec_cond = env->condexec_bits >> 4;
8570 #if !defined(CONFIG_USER_ONLY)
8572 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8574 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8577 cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8578 cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8579 cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8580 cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
8583 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8584 cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
8585 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8588 max_insns = tb->cflags & CF_COUNT_MASK;
8590 max_insns = CF_COUNT_MASK;
8593 /* Reset the conditional execution bits immediately. This avoids
8594 complications trying to do it at the end of the block. */
8595 if (env->condexec_bits)
8597 TCGv tmp = new_tmp();
8598 tcg_gen_movi_i32(tmp, 0);
8599 store_cpu_field(tmp, condexec_bits);
8602 #ifdef CONFIG_USER_ONLY
8603 /* Intercept jump to the magic kernel page. */
8604 if (dc->pc >= 0xffff0000) {
8605 /* We always get here via a jump, so know we are not in a
8606 conditional execution block. */
8607 gen_exception(EXCP_KERNEL_TRAP);
8608 dc->is_jmp = DISAS_UPDATE;
8612 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8613 /* We always get here via a jump, so know we are not in a
8614 conditional execution block. */
8615 gen_exception(EXCP_EXCEPTION_EXIT);
8616 dc->is_jmp = DISAS_UPDATE;
8621 if (env->nb_breakpoints > 0) {
8622 for(j = 0; j < env->nb_breakpoints; j++) {
8623 if (env->breakpoints[j] == dc->pc) {
8624 gen_set_condexec(dc);
8625 gen_set_pc_im(dc->pc);
8626 gen_exception(EXCP_DEBUG);
8627 dc->is_jmp = DISAS_JUMP;
8628 /* Advance PC so that clearing the breakpoint will
8629 invalidate this TB. */
8631 goto done_generating;
8637 j = gen_opc_ptr - gen_opc_buf;
8641 gen_opc_instr_start[lj++] = 0;
8643 gen_opc_pc[lj] = dc->pc;
8644 gen_opc_instr_start[lj] = 1;
8645 gen_opc_icount[lj] = num_insns;
8648 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8652 disas_thumb_insn(env, dc);
8653 if (dc->condexec_mask) {
8654 dc->condexec_cond = (dc->condexec_cond & 0xe)
8655 | ((dc->condexec_mask >> 4) & 1);
8656 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8657 if (dc->condexec_mask == 0) {
8658 dc->condexec_cond = 0;
8662 disas_arm_insn(env, dc);
8665 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8669 if (dc->condjmp && !dc->is_jmp) {
8670 gen_set_label(dc->condlabel);
8673 /* Terminate the TB on memory ops if watchpoints are present. */
8674 /* FIXME: This should be replacd by the deterministic execution
8675 * IRQ raising bits. */
8676 if (dc->is_mem && env->nb_watchpoints)
8679 /* Translation stops when a conditional branch is enoutered.
8680 * Otherwise the subsequent code could get translated several times.
8681 * Also stop translation when a page boundary is reached. This
8682 * ensures prefetch aborts occur at the right place. */
8684 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8685 !env->singlestep_enabled &&
8686 dc->pc < next_page_start &&
8687 num_insns < max_insns);
8689 if (tb->cflags & CF_LAST_IO) {
8691 /* FIXME: This can theoretically happen with self-modifying
8693 cpu_abort(env, "IO on conditional branch instruction");
8698 /* At this stage dc->condjmp will only be set when the skipped
8699 instruction was a conditional branch or trap, and the PC has
8700 already been written. */
8701 if (unlikely(env->singlestep_enabled)) {
8702 /* Make sure the pc is updated, and raise a debug exception. */
8704 gen_set_condexec(dc);
8705 if (dc->is_jmp == DISAS_SWI) {
8706 gen_exception(EXCP_SWI);
8708 gen_exception(EXCP_DEBUG);
8710 gen_set_label(dc->condlabel);
8712 if (dc->condjmp || !dc->is_jmp) {
8713 gen_set_pc_im(dc->pc);
8716 gen_set_condexec(dc);
8717 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
8718 gen_exception(EXCP_SWI);
8720 /* FIXME: Single stepping a WFI insn will not halt
8722 gen_exception(EXCP_DEBUG);
8725 /* While branches must always occur at the end of an IT block,
8726 there are a few other things that can cause us to terminate
8727 the TB in the middel of an IT block:
8728 - Exception generating instructions (bkpt, swi, undefined).
8730 - Hardware watchpoints.
8731 Hardware breakpoints have already been handled and skip this code.
8733 gen_set_condexec(dc);
8734 switch(dc->is_jmp) {
8736 gen_goto_tb(dc, 1, dc->pc);
8741 /* indicate that the hash table must be used to find the next TB */
8745 /* nothing more to generate */
8751 gen_exception(EXCP_SWI);
8755 gen_set_label(dc->condlabel);
8756 gen_set_condexec(dc);
8757 gen_goto_tb(dc, 1, dc->pc);
8763 gen_icount_end(tb, num_insns);
8764 *gen_opc_ptr = INDEX_op_end;
8767 if (loglevel & CPU_LOG_TB_IN_ASM) {
8768 fprintf(logfile, "----------------\n");
8769 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
8770 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
8771 fprintf(logfile, "\n");
8775 j = gen_opc_ptr - gen_opc_buf;
8778 gen_opc_instr_start[lj++] = 0;
8780 tb->size = dc->pc - pc_start;
8781 tb->icount = num_insns;
8785 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8787 gen_intermediate_code_internal(env, tb, 0);
8790 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8792 gen_intermediate_code_internal(env, tb, 1);
8795 static const char *cpu_mode_names[16] = {
8796 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8797 "???", "???", "???", "und", "???", "???", "???", "sys"
8800 void cpu_dump_state(CPUState *env, FILE *f,
8801 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8811 /* ??? This assumes float64 and double have the same layout.
8812 Oh well, it's only debug dumps. */
8821 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
8823 cpu_fprintf(f, "\n");
8825 cpu_fprintf(f, " ");
8827 psr = cpsr_read(env);
8828 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8830 psr & (1 << 31) ? 'N' : '-',
8831 psr & (1 << 30) ? 'Z' : '-',
8832 psr & (1 << 29) ? 'C' : '-',
8833 psr & (1 << 28) ? 'V' : '-',
8834 psr & CPSR_T ? 'T' : 'A',
8835 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
8838 for (i = 0; i < 16; i++) {
8839 d.d = env->vfp.regs[i];
8843 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8844 i * 2, (int)s0.i, s0.s,
8845 i * 2 + 1, (int)s1.i, s1.s,
8846 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
8849 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
8853 void gen_pc_load(CPUState *env, TranslationBlock *tb,
8854 unsigned long searched_pc, int pc_pos, void *puc)
8856 env->regs[15] = gen_opc_pc[pc_pos];