4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "internals.h"
29 #include "disas/disas.h"
32 #include "qemu/bitops.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J 0
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
55 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
57 #if defined(CONFIG_USER_ONLY)
60 #define IS_USER(s) (s->user)
64 /* We reuse the same 64-bit temporaries for efficiency. */
65 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
66 static TCGv_i32 cpu_R[16];
67 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
68 static TCGv_i64 cpu_exclusive_addr;
69 static TCGv_i64 cpu_exclusive_val;
70 #ifdef CONFIG_USER_ONLY
71 static TCGv_i64 cpu_exclusive_test;
72 static TCGv_i32 cpu_exclusive_info;
75 /* FIXME: These should be removed. */
76 static TCGv_i32 cpu_F0s, cpu_F1s;
77 static TCGv_i64 cpu_F0d, cpu_F1d;
79 #include "exec/gen-icount.h"
81 static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
85 /* initialize TCG globals. */
86 void arm_translate_init(void)
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
94 offsetof(CPUARMState, regs[i]),
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
106 #ifdef CONFIG_USER_ONLY
107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
113 a64_translate_init();
116 static inline TCGv_i32 load_cpu_offset(int offset)
118 TCGv_i32 tmp = tcg_temp_new_i32();
119 tcg_gen_ld_i32(tmp, cpu_env, offset);
123 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
125 static inline void store_cpu_offset(TCGv_i32 var, int offset)
127 tcg_gen_st_i32(var, cpu_env, offset);
128 tcg_temp_free_i32(var);
131 #define store_cpu_field(var, name) \
132 store_cpu_offset(var, offsetof(CPUARMState, name))
134 /* Set a variable to the value of a CPU register. */
135 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
139 /* normally, since we updated PC, we need only to add one insn */
141 addr = (long)s->pc + 2;
143 addr = (long)s->pc + 4;
144 tcg_gen_movi_i32(var, addr);
146 tcg_gen_mov_i32(var, cpu_R[reg]);
150 /* Create a new temporary and set it to the value of a CPU register. */
151 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
153 TCGv_i32 tmp = tcg_temp_new_i32();
154 load_reg_var(s, tmp, reg);
158 /* Set a CPU register. The source must be a temporary and will be
160 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
163 tcg_gen_andi_i32(var, var, ~1);
164 s->is_jmp = DISAS_JUMP;
166 tcg_gen_mov_i32(cpu_R[reg], var);
167 tcg_temp_free_i32(var);
170 /* Value extensions. */
171 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
172 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
173 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
174 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
176 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
177 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
180 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
182 TCGv_i32 tmp_mask = tcg_const_i32(mask);
183 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
184 tcg_temp_free_i32(tmp_mask);
186 /* Set NZCV flags from the high 4 bits of var. */
187 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
189 static void gen_exception_internal(int excp)
191 TCGv_i32 tcg_excp = tcg_const_i32(excp);
193 assert(excp_is_internal(excp));
194 gen_helper_exception_internal(cpu_env, tcg_excp);
195 tcg_temp_free_i32(tcg_excp);
198 static void gen_exception(int excp, uint32_t syndrome)
200 TCGv_i32 tcg_excp = tcg_const_i32(excp);
201 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
203 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
204 tcg_temp_free_i32(tcg_syn);
205 tcg_temp_free_i32(tcg_excp);
208 static void gen_ss_advance(DisasContext *s)
210 /* If the singlestep state is Active-not-pending, advance to
215 gen_helper_clear_pstate_ss(cpu_env);
219 static void gen_step_complete_exception(DisasContext *s)
221 /* We just completed step of an insn. Move from Active-not-pending
222 * to Active-pending, and then also take the swstep exception.
223 * This corresponds to making the (IMPDEF) choice to prioritize
224 * swstep exceptions over asynchronous exceptions taken to an exception
225 * level where debug is disabled. This choice has the advantage that
226 * we do not need to maintain internal state corresponding to the
227 * ISV/EX syndrome bits between completion of the step and generation
228 * of the exception, and our syndrome information is always correct.
231 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
232 s->is_jmp = DISAS_EXC;
235 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
237 TCGv_i32 tmp1 = tcg_temp_new_i32();
238 TCGv_i32 tmp2 = tcg_temp_new_i32();
239 tcg_gen_ext16s_i32(tmp1, a);
240 tcg_gen_ext16s_i32(tmp2, b);
241 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
242 tcg_temp_free_i32(tmp2);
243 tcg_gen_sari_i32(a, a, 16);
244 tcg_gen_sari_i32(b, b, 16);
245 tcg_gen_mul_i32(b, b, a);
246 tcg_gen_mov_i32(a, tmp1);
247 tcg_temp_free_i32(tmp1);
250 /* Byteswap each halfword. */
251 static void gen_rev16(TCGv_i32 var)
253 TCGv_i32 tmp = tcg_temp_new_i32();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_andi_i32(var, var, 0xff00ff00);
258 tcg_gen_or_i32(var, var, tmp);
259 tcg_temp_free_i32(tmp);
262 /* Byteswap low halfword and sign extend. */
263 static void gen_revsh(TCGv_i32 var)
265 tcg_gen_ext16u_i32(var, var);
266 tcg_gen_bswap16_i32(var, var);
267 tcg_gen_ext16s_i32(var, var);
270 /* Unsigned bitfield extract. */
271 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
274 tcg_gen_shri_i32(var, var, shift);
275 tcg_gen_andi_i32(var, var, mask);
278 /* Signed bitfield extract. */
279 static void gen_sbfx(TCGv_i32 var, int shift, int width)
284 tcg_gen_sari_i32(var, var, shift);
285 if (shift + width < 32) {
286 signbit = 1u << (width - 1);
287 tcg_gen_andi_i32(var, var, (1u << width) - 1);
288 tcg_gen_xori_i32(var, var, signbit);
289 tcg_gen_subi_i32(var, var, signbit);
293 /* Return (b << 32) + a. Mark inputs as dead */
294 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
298 tcg_gen_extu_i32_i64(tmp64, b);
299 tcg_temp_free_i32(b);
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_add_i64(a, tmp64, a);
303 tcg_temp_free_i64(tmp64);
307 /* Return (b << 32) - a. Mark inputs as dead. */
308 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
310 TCGv_i64 tmp64 = tcg_temp_new_i64();
312 tcg_gen_extu_i32_i64(tmp64, b);
313 tcg_temp_free_i32(b);
314 tcg_gen_shli_i64(tmp64, tmp64, 32);
315 tcg_gen_sub_i64(a, tmp64, a);
317 tcg_temp_free_i64(tmp64);
321 /* 32x32->64 multiply. Marks inputs as dead. */
322 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
324 TCGv_i32 lo = tcg_temp_new_i32();
325 TCGv_i32 hi = tcg_temp_new_i32();
328 tcg_gen_mulu2_i32(lo, hi, a, b);
329 tcg_temp_free_i32(a);
330 tcg_temp_free_i32(b);
332 ret = tcg_temp_new_i64();
333 tcg_gen_concat_i32_i64(ret, lo, hi);
334 tcg_temp_free_i32(lo);
335 tcg_temp_free_i32(hi);
340 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
342 TCGv_i32 lo = tcg_temp_new_i32();
343 TCGv_i32 hi = tcg_temp_new_i32();
346 tcg_gen_muls2_i32(lo, hi, a, b);
347 tcg_temp_free_i32(a);
348 tcg_temp_free_i32(b);
350 ret = tcg_temp_new_i64();
351 tcg_gen_concat_i32_i64(ret, lo, hi);
352 tcg_temp_free_i32(lo);
353 tcg_temp_free_i32(hi);
358 /* Swap low and high halfwords. */
359 static void gen_swap_half(TCGv_i32 var)
361 TCGv_i32 tmp = tcg_temp_new_i32();
362 tcg_gen_shri_i32(tmp, var, 16);
363 tcg_gen_shli_i32(var, var, 16);
364 tcg_gen_or_i32(var, var, tmp);
365 tcg_temp_free_i32(tmp);
368 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
369 tmp = (t0 ^ t1) & 0x8000;
372 t0 = (t0 + t1) ^ tmp;
375 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
377 TCGv_i32 tmp = tcg_temp_new_i32();
378 tcg_gen_xor_i32(tmp, t0, t1);
379 tcg_gen_andi_i32(tmp, tmp, 0x8000);
380 tcg_gen_andi_i32(t0, t0, ~0x8000);
381 tcg_gen_andi_i32(t1, t1, ~0x8000);
382 tcg_gen_add_i32(t0, t0, t1);
383 tcg_gen_xor_i32(t0, t0, tmp);
384 tcg_temp_free_i32(tmp);
385 tcg_temp_free_i32(t1);
388 /* Set CF to the top bit of var. */
389 static void gen_set_CF_bit31(TCGv_i32 var)
391 tcg_gen_shri_i32(cpu_CF, var, 31);
394 /* Set N and Z flags from var. */
395 static inline void gen_logic_CC(TCGv_i32 var)
397 tcg_gen_mov_i32(cpu_NF, var);
398 tcg_gen_mov_i32(cpu_ZF, var);
402 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
404 tcg_gen_add_i32(t0, t0, t1);
405 tcg_gen_add_i32(t0, t0, cpu_CF);
408 /* dest = T0 + T1 + CF. */
409 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
411 tcg_gen_add_i32(dest, t0, t1);
412 tcg_gen_add_i32(dest, dest, cpu_CF);
415 /* dest = T0 - T1 + CF - 1. */
416 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
418 tcg_gen_sub_i32(dest, t0, t1);
419 tcg_gen_add_i32(dest, dest, cpu_CF);
420 tcg_gen_subi_i32(dest, dest, 1);
423 /* dest = T0 + T1. Compute C, N, V and Z flags */
424 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
426 TCGv_i32 tmp = tcg_temp_new_i32();
427 tcg_gen_movi_i32(tmp, 0);
428 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
429 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
430 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
431 tcg_gen_xor_i32(tmp, t0, t1);
432 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
433 tcg_temp_free_i32(tmp);
434 tcg_gen_mov_i32(dest, cpu_NF);
437 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
438 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
440 TCGv_i32 tmp = tcg_temp_new_i32();
441 if (TCG_TARGET_HAS_add2_i32) {
442 tcg_gen_movi_i32(tmp, 0);
443 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
444 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
446 TCGv_i64 q0 = tcg_temp_new_i64();
447 TCGv_i64 q1 = tcg_temp_new_i64();
448 tcg_gen_extu_i32_i64(q0, t0);
449 tcg_gen_extu_i32_i64(q1, t1);
450 tcg_gen_add_i64(q0, q0, q1);
451 tcg_gen_extu_i32_i64(q1, cpu_CF);
452 tcg_gen_add_i64(q0, q0, q1);
453 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
454 tcg_temp_free_i64(q0);
455 tcg_temp_free_i64(q1);
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
459 tcg_gen_xor_i32(tmp, t0, t1);
460 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
461 tcg_temp_free_i32(tmp);
462 tcg_gen_mov_i32(dest, cpu_NF);
465 /* dest = T0 - T1. Compute C, N, V and Z flags */
466 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
469 tcg_gen_sub_i32(cpu_NF, t0, t1);
470 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
471 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
472 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
473 tmp = tcg_temp_new_i32();
474 tcg_gen_xor_i32(tmp, t0, t1);
475 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
476 tcg_temp_free_i32(tmp);
477 tcg_gen_mov_i32(dest, cpu_NF);
480 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
481 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
483 TCGv_i32 tmp = tcg_temp_new_i32();
484 tcg_gen_not_i32(tmp, t1);
485 gen_adc_CC(dest, t0, tmp);
486 tcg_temp_free_i32(tmp);
489 #define GEN_SHIFT(name) \
490 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
492 TCGv_i32 tmp1, tmp2, tmp3; \
493 tmp1 = tcg_temp_new_i32(); \
494 tcg_gen_andi_i32(tmp1, t1, 0xff); \
495 tmp2 = tcg_const_i32(0); \
496 tmp3 = tcg_const_i32(0x1f); \
497 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
498 tcg_temp_free_i32(tmp3); \
499 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
500 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
501 tcg_temp_free_i32(tmp2); \
502 tcg_temp_free_i32(tmp1); \
508 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
511 tmp1 = tcg_temp_new_i32();
512 tcg_gen_andi_i32(tmp1, t1, 0xff);
513 tmp2 = tcg_const_i32(0x1f);
514 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
515 tcg_temp_free_i32(tmp2);
516 tcg_gen_sar_i32(dest, t0, tmp1);
517 tcg_temp_free_i32(tmp1);
520 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
522 TCGv_i32 c0 = tcg_const_i32(0);
523 TCGv_i32 tmp = tcg_temp_new_i32();
524 tcg_gen_neg_i32(tmp, src);
525 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
526 tcg_temp_free_i32(c0);
527 tcg_temp_free_i32(tmp);
530 static void shifter_out_im(TCGv_i32 var, int shift)
533 tcg_gen_andi_i32(cpu_CF, var, 1);
535 tcg_gen_shri_i32(cpu_CF, var, shift);
537 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
542 /* Shift by immediate. Includes special handling for shift == 0. */
543 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
544 int shift, int flags)
550 shifter_out_im(var, 32 - shift);
551 tcg_gen_shli_i32(var, var, shift);
557 tcg_gen_shri_i32(cpu_CF, var, 31);
559 tcg_gen_movi_i32(var, 0);
562 shifter_out_im(var, shift - 1);
563 tcg_gen_shri_i32(var, var, shift);
570 shifter_out_im(var, shift - 1);
573 tcg_gen_sari_i32(var, var, shift);
575 case 3: /* ROR/RRX */
578 shifter_out_im(var, shift - 1);
579 tcg_gen_rotri_i32(var, var, shift); break;
581 TCGv_i32 tmp = tcg_temp_new_i32();
582 tcg_gen_shli_i32(tmp, cpu_CF, 31);
584 shifter_out_im(var, 0);
585 tcg_gen_shri_i32(var, var, 1);
586 tcg_gen_or_i32(var, var, tmp);
587 tcg_temp_free_i32(tmp);
592 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
593 TCGv_i32 shift, int flags)
597 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
598 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
599 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
600 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
605 gen_shl(var, var, shift);
608 gen_shr(var, var, shift);
611 gen_sar(var, var, shift);
613 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
614 tcg_gen_rotr_i32(var, var, shift); break;
617 tcg_temp_free_i32(shift);
620 #define PAS_OP(pfx) \
622 case 0: gen_pas_helper(glue(pfx,add16)); break; \
623 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
624 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
625 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
626 case 4: gen_pas_helper(glue(pfx,add8)); break; \
627 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
629 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
634 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
636 tmp = tcg_temp_new_ptr();
637 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
639 tcg_temp_free_ptr(tmp);
642 tmp = tcg_temp_new_ptr();
643 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
645 tcg_temp_free_ptr(tmp);
647 #undef gen_pas_helper
648 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
661 #undef gen_pas_helper
666 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
667 #define PAS_OP(pfx) \
669 case 0: gen_pas_helper(glue(pfx,add8)); break; \
670 case 1: gen_pas_helper(glue(pfx,add16)); break; \
671 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
672 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
673 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
674 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
676 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
681 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
683 tmp = tcg_temp_new_ptr();
684 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
686 tcg_temp_free_ptr(tmp);
689 tmp = tcg_temp_new_ptr();
690 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
692 tcg_temp_free_ptr(tmp);
694 #undef gen_pas_helper
695 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
708 #undef gen_pas_helper
714 * generate a conditional branch based on ARM condition code cc.
715 * This is common between ARM and Aarch64 targets.
717 void arm_gen_test_cc(int cc, int label)
724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
727 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
730 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
733 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
736 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
739 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
742 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
745 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
747 case 8: /* hi: C && !Z */
748 inv = gen_new_label();
749 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
750 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
753 case 9: /* ls: !C || Z */
754 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
757 case 10: /* ge: N == V -> N ^ V == 0 */
758 tmp = tcg_temp_new_i32();
759 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
760 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
761 tcg_temp_free_i32(tmp);
763 case 11: /* lt: N != V -> N ^ V != 0 */
764 tmp = tcg_temp_new_i32();
765 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
766 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
767 tcg_temp_free_i32(tmp);
769 case 12: /* gt: !Z && N == V */
770 inv = gen_new_label();
771 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
772 tmp = tcg_temp_new_i32();
773 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
774 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
775 tcg_temp_free_i32(tmp);
778 case 13: /* le: Z || N != V */
779 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
780 tmp = tcg_temp_new_i32();
781 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
782 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
783 tcg_temp_free_i32(tmp);
786 fprintf(stderr, "Bad condition code 0x%x\n", cc);
791 static const uint8_t table_logic_cc[16] = {
810 /* Set PC and Thumb state from an immediate address. */
811 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
815 s->is_jmp = DISAS_UPDATE;
816 if (s->thumb != (addr & 1)) {
817 tmp = tcg_temp_new_i32();
818 tcg_gen_movi_i32(tmp, addr & 1);
819 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
820 tcg_temp_free_i32(tmp);
822 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
825 /* Set PC and Thumb state from var. var is marked as dead. */
826 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
828 s->is_jmp = DISAS_UPDATE;
829 tcg_gen_andi_i32(cpu_R[15], var, ~1);
830 tcg_gen_andi_i32(var, var, 1);
831 store_cpu_field(var, thumb);
834 /* Variant of store_reg which uses branch&exchange logic when storing
835 to r15 in ARM architecture v7 and above. The source must be a temporary
836 and will be marked as dead. */
837 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
839 if (reg == 15 && ENABLE_ARCH_7) {
842 store_reg(s, reg, var);
846 /* Variant of store_reg which uses branch&exchange logic when storing
847 * to r15 in ARM architecture v5T and above. This is used for storing
848 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
849 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
850 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
852 if (reg == 15 && ENABLE_ARCH_5) {
855 store_reg(s, reg, var);
859 /* Abstractions of "generate code to do a guest load/store for
860 * AArch32", where a vaddr is always 32 bits (and is zero
861 * extended if we're a 64 bit core) and data is also
862 * 32 bits unless specifically doing a 64 bit access.
863 * These functions work like tcg_gen_qemu_{ld,st}* except
864 * that the address argument is TCGv_i32 rather than TCGv.
866 #if TARGET_LONG_BITS == 32
868 #define DO_GEN_LD(SUFF, OPC) \
869 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
871 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
874 #define DO_GEN_ST(SUFF, OPC) \
875 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
877 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
880 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
882 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
885 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
887 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
892 #define DO_GEN_LD(SUFF, OPC) \
893 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
895 TCGv addr64 = tcg_temp_new(); \
896 tcg_gen_extu_i32_i64(addr64, addr); \
897 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
898 tcg_temp_free(addr64); \
901 #define DO_GEN_ST(SUFF, OPC) \
902 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
904 TCGv addr64 = tcg_temp_new(); \
905 tcg_gen_extu_i32_i64(addr64, addr); \
906 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
907 tcg_temp_free(addr64); \
910 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
912 TCGv addr64 = tcg_temp_new();
913 tcg_gen_extu_i32_i64(addr64, addr);
914 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
915 tcg_temp_free(addr64);
918 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
920 TCGv addr64 = tcg_temp_new();
921 tcg_gen_extu_i32_i64(addr64, addr);
922 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
923 tcg_temp_free(addr64);
930 DO_GEN_LD(16s, MO_TESW)
931 DO_GEN_LD(16u, MO_TEUW)
932 DO_GEN_LD(32u, MO_TEUL)
934 DO_GEN_ST(16, MO_TEUW)
935 DO_GEN_ST(32, MO_TEUL)
937 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
939 tcg_gen_movi_i32(cpu_R[15], val);
942 static inline void gen_hvc(DisasContext *s, int imm16)
944 /* The pre HVC helper handles cases when HVC gets trapped
945 * as an undefined insn by runtime configuration (ie before
946 * the insn really executes).
948 gen_set_pc_im(s, s->pc - 4);
949 gen_helper_pre_hvc(cpu_env);
950 /* Otherwise we will treat this as a real exception which
951 * happens after execution of the insn. (The distinction matters
952 * for the PC value reported to the exception handler and also
953 * for single stepping.)
956 gen_set_pc_im(s, s->pc);
957 s->is_jmp = DISAS_HVC;
960 static inline void gen_smc(DisasContext *s)
962 /* As with HVC, we may take an exception either before or after
967 gen_set_pc_im(s, s->pc - 4);
968 tmp = tcg_const_i32(syn_aa32_smc());
969 gen_helper_pre_smc(cpu_env, tmp);
970 tcg_temp_free_i32(tmp);
971 gen_set_pc_im(s, s->pc);
972 s->is_jmp = DISAS_SMC;
976 gen_set_condexec (DisasContext *s)
978 if (s->condexec_mask) {
979 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
980 TCGv_i32 tmp = tcg_temp_new_i32();
981 tcg_gen_movi_i32(tmp, val);
982 store_cpu_field(tmp, condexec_bits);
986 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
989 gen_set_pc_im(s, s->pc - offset);
990 gen_exception_internal(excp);
991 s->is_jmp = DISAS_JUMP;
994 static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
997 gen_set_pc_im(s, s->pc - offset);
998 gen_exception(excp, syn);
999 s->is_jmp = DISAS_JUMP;
1002 /* Force a TB lookup after an instruction that changes the CPU state. */
1003 static inline void gen_lookup_tb(DisasContext *s)
1005 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1006 s->is_jmp = DISAS_UPDATE;
1009 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1012 int val, rm, shift, shiftop;
1015 if (!(insn & (1 << 25))) {
1018 if (!(insn & (1 << 23)))
1021 tcg_gen_addi_i32(var, var, val);
1023 /* shift/register */
1025 shift = (insn >> 7) & 0x1f;
1026 shiftop = (insn >> 5) & 3;
1027 offset = load_reg(s, rm);
1028 gen_arm_shift_im(offset, shiftop, shift, 0);
1029 if (!(insn & (1 << 23)))
1030 tcg_gen_sub_i32(var, var, offset);
1032 tcg_gen_add_i32(var, var, offset);
1033 tcg_temp_free_i32(offset);
1037 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1038 int extra, TCGv_i32 var)
1043 if (insn & (1 << 22)) {
1045 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1046 if (!(insn & (1 << 23)))
1050 tcg_gen_addi_i32(var, var, val);
1054 tcg_gen_addi_i32(var, var, extra);
1056 offset = load_reg(s, rm);
1057 if (!(insn & (1 << 23)))
1058 tcg_gen_sub_i32(var, var, offset);
1060 tcg_gen_add_i32(var, var, offset);
1061 tcg_temp_free_i32(offset);
1065 static TCGv_ptr get_fpstatus_ptr(int neon)
1067 TCGv_ptr statusptr = tcg_temp_new_ptr();
1070 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1072 offset = offsetof(CPUARMState, vfp.fp_status);
1074 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1078 #define VFP_OP2(name) \
1079 static inline void gen_vfp_##name(int dp) \
1081 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1083 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1085 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1087 tcg_temp_free_ptr(fpst); \
1097 static inline void gen_vfp_F1_mul(int dp)
1099 /* Like gen_vfp_mul() but put result in F1 */
1100 TCGv_ptr fpst = get_fpstatus_ptr(0);
1102 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1104 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1106 tcg_temp_free_ptr(fpst);
1109 static inline void gen_vfp_F1_neg(int dp)
1111 /* Like gen_vfp_neg() but put result in F1 */
1113 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1115 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1119 static inline void gen_vfp_abs(int dp)
1122 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1124 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1127 static inline void gen_vfp_neg(int dp)
1130 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1132 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1135 static inline void gen_vfp_sqrt(int dp)
1138 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1140 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1143 static inline void gen_vfp_cmp(int dp)
1146 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1148 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1151 static inline void gen_vfp_cmpe(int dp)
1154 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1156 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1159 static inline void gen_vfp_F1_ld0(int dp)
1162 tcg_gen_movi_i64(cpu_F1d, 0);
1164 tcg_gen_movi_i32(cpu_F1s, 0);
1167 #define VFP_GEN_ITOF(name) \
1168 static inline void gen_vfp_##name(int dp, int neon) \
1170 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1172 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1174 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1176 tcg_temp_free_ptr(statusptr); \
1183 #define VFP_GEN_FTOI(name) \
1184 static inline void gen_vfp_##name(int dp, int neon) \
1186 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1188 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1190 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1192 tcg_temp_free_ptr(statusptr); \
1201 #define VFP_GEN_FIX(name, round) \
1202 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1204 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1205 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1207 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1210 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1213 tcg_temp_free_i32(tmp_shift); \
1214 tcg_temp_free_ptr(statusptr); \
1216 VFP_GEN_FIX(tosh, _round_to_zero)
1217 VFP_GEN_FIX(tosl, _round_to_zero)
1218 VFP_GEN_FIX(touh, _round_to_zero)
1219 VFP_GEN_FIX(toul, _round_to_zero)
1226 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1229 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
1231 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
1235 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1238 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
1240 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
1245 vfp_reg_offset (int dp, int reg)
1248 return offsetof(CPUARMState, vfp.regs[reg]);
1250 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1251 + offsetof(CPU_DoubleU, l.upper);
1253 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1254 + offsetof(CPU_DoubleU, l.lower);
1258 /* Return the offset of a 32-bit piece of a NEON register.
1259 zero is the least significant end of the register. */
1261 neon_reg_offset (int reg, int n)
1265 return vfp_reg_offset(0, sreg);
1268 static TCGv_i32 neon_load_reg(int reg, int pass)
1270 TCGv_i32 tmp = tcg_temp_new_i32();
1271 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1275 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1277 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1278 tcg_temp_free_i32(var);
1281 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1283 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1286 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1288 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1291 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1292 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1293 #define tcg_gen_st_f32 tcg_gen_st_i32
1294 #define tcg_gen_st_f64 tcg_gen_st_i64
1296 static inline void gen_mov_F0_vreg(int dp, int reg)
1299 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1301 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1304 static inline void gen_mov_F1_vreg(int dp, int reg)
1307 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1309 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1312 static inline void gen_mov_vreg_F0(int dp, int reg)
1315 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1317 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1320 #define ARM_CP_RW_BIT (1 << 20)
1322 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1324 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1327 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1329 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1332 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1334 TCGv_i32 var = tcg_temp_new_i32();
1335 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1339 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1341 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1342 tcg_temp_free_i32(var);
1345 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1347 iwmmxt_store_reg(cpu_M0, rn);
1350 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1352 iwmmxt_load_reg(cpu_M0, rn);
1355 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1357 iwmmxt_load_reg(cpu_V1, rn);
1358 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1361 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1363 iwmmxt_load_reg(cpu_V1, rn);
1364 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1367 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1369 iwmmxt_load_reg(cpu_V1, rn);
1370 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1373 #define IWMMXT_OP(name) \
1374 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1376 iwmmxt_load_reg(cpu_V1, rn); \
1377 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1380 #define IWMMXT_OP_ENV(name) \
1381 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1383 iwmmxt_load_reg(cpu_V1, rn); \
1384 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1387 #define IWMMXT_OP_ENV_SIZE(name) \
1388 IWMMXT_OP_ENV(name##b) \
1389 IWMMXT_OP_ENV(name##w) \
1390 IWMMXT_OP_ENV(name##l)
1392 #define IWMMXT_OP_ENV1(name) \
1393 static inline void gen_op_iwmmxt_##name##_M0(void) \
1395 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1409 IWMMXT_OP_ENV_SIZE(unpackl)
1410 IWMMXT_OP_ENV_SIZE(unpackh)
1412 IWMMXT_OP_ENV1(unpacklub)
1413 IWMMXT_OP_ENV1(unpackluw)
1414 IWMMXT_OP_ENV1(unpacklul)
1415 IWMMXT_OP_ENV1(unpackhub)
1416 IWMMXT_OP_ENV1(unpackhuw)
1417 IWMMXT_OP_ENV1(unpackhul)
1418 IWMMXT_OP_ENV1(unpacklsb)
1419 IWMMXT_OP_ENV1(unpacklsw)
1420 IWMMXT_OP_ENV1(unpacklsl)
1421 IWMMXT_OP_ENV1(unpackhsb)
1422 IWMMXT_OP_ENV1(unpackhsw)
1423 IWMMXT_OP_ENV1(unpackhsl)
1425 IWMMXT_OP_ENV_SIZE(cmpeq)
1426 IWMMXT_OP_ENV_SIZE(cmpgtu)
1427 IWMMXT_OP_ENV_SIZE(cmpgts)
1429 IWMMXT_OP_ENV_SIZE(mins)
1430 IWMMXT_OP_ENV_SIZE(minu)
1431 IWMMXT_OP_ENV_SIZE(maxs)
1432 IWMMXT_OP_ENV_SIZE(maxu)
1434 IWMMXT_OP_ENV_SIZE(subn)
1435 IWMMXT_OP_ENV_SIZE(addn)
1436 IWMMXT_OP_ENV_SIZE(subu)
1437 IWMMXT_OP_ENV_SIZE(addu)
1438 IWMMXT_OP_ENV_SIZE(subs)
1439 IWMMXT_OP_ENV_SIZE(adds)
1441 IWMMXT_OP_ENV(avgb0)
1442 IWMMXT_OP_ENV(avgb1)
1443 IWMMXT_OP_ENV(avgw0)
1444 IWMMXT_OP_ENV(avgw1)
1446 IWMMXT_OP_ENV(packuw)
1447 IWMMXT_OP_ENV(packul)
1448 IWMMXT_OP_ENV(packuq)
1449 IWMMXT_OP_ENV(packsw)
1450 IWMMXT_OP_ENV(packsl)
1451 IWMMXT_OP_ENV(packsq)
1453 static void gen_op_iwmmxt_set_mup(void)
1456 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1457 tcg_gen_ori_i32(tmp, tmp, 2);
1458 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1461 static void gen_op_iwmmxt_set_cup(void)
1464 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1465 tcg_gen_ori_i32(tmp, tmp, 1);
1466 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1469 static void gen_op_iwmmxt_setpsr_nz(void)
1471 TCGv_i32 tmp = tcg_temp_new_i32();
1472 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1473 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1476 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1478 iwmmxt_load_reg(cpu_V1, rn);
1479 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1480 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1483 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1490 rd = (insn >> 16) & 0xf;
1491 tmp = load_reg(s, rd);
1493 offset = (insn & 0xff) << ((insn >> 7) & 2);
1494 if (insn & (1 << 24)) {
1496 if (insn & (1 << 23))
1497 tcg_gen_addi_i32(tmp, tmp, offset);
1499 tcg_gen_addi_i32(tmp, tmp, -offset);
1500 tcg_gen_mov_i32(dest, tmp);
1501 if (insn & (1 << 21))
1502 store_reg(s, rd, tmp);
1504 tcg_temp_free_i32(tmp);
1505 } else if (insn & (1 << 21)) {
1507 tcg_gen_mov_i32(dest, tmp);
1508 if (insn & (1 << 23))
1509 tcg_gen_addi_i32(tmp, tmp, offset);
1511 tcg_gen_addi_i32(tmp, tmp, -offset);
1512 store_reg(s, rd, tmp);
1513 } else if (!(insn & (1 << 23)))
1518 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1520 int rd = (insn >> 0) & 0xf;
1523 if (insn & (1 << 8)) {
1524 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1527 tmp = iwmmxt_load_creg(rd);
1530 tmp = tcg_temp_new_i32();
1531 iwmmxt_load_reg(cpu_V0, rd);
1532 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1534 tcg_gen_andi_i32(tmp, tmp, mask);
1535 tcg_gen_mov_i32(dest, tmp);
1536 tcg_temp_free_i32(tmp);
1540 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1541 (ie. an undefined instruction). */
1542 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1545 int rdhi, rdlo, rd0, rd1, i;
1547 TCGv_i32 tmp, tmp2, tmp3;
1549 if ((insn & 0x0e000e00) == 0x0c000000) {
1550 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1552 rdlo = (insn >> 12) & 0xf;
1553 rdhi = (insn >> 16) & 0xf;
1554 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1555 iwmmxt_load_reg(cpu_V0, wrd);
1556 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1557 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1558 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1559 } else { /* TMCRR */
1560 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1561 iwmmxt_store_reg(cpu_V0, wrd);
1562 gen_op_iwmmxt_set_mup();
1567 wrd = (insn >> 12) & 0xf;
1568 addr = tcg_temp_new_i32();
1569 if (gen_iwmmxt_address(s, insn, addr)) {
1570 tcg_temp_free_i32(addr);
1573 if (insn & ARM_CP_RW_BIT) {
1574 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1575 tmp = tcg_temp_new_i32();
1576 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1577 iwmmxt_store_creg(wrd, tmp);
1580 if (insn & (1 << 8)) {
1581 if (insn & (1 << 22)) { /* WLDRD */
1582 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
1584 } else { /* WLDRW wRd */
1585 tmp = tcg_temp_new_i32();
1586 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1589 tmp = tcg_temp_new_i32();
1590 if (insn & (1 << 22)) { /* WLDRH */
1591 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
1592 } else { /* WLDRB */
1593 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
1597 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1598 tcg_temp_free_i32(tmp);
1600 gen_op_iwmmxt_movq_wRn_M0(wrd);
1603 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1604 tmp = iwmmxt_load_creg(wrd);
1605 gen_aa32_st32(tmp, addr, get_mem_index(s));
1607 gen_op_iwmmxt_movq_M0_wRn(wrd);
1608 tmp = tcg_temp_new_i32();
1609 if (insn & (1 << 8)) {
1610 if (insn & (1 << 22)) { /* WSTRD */
1611 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
1612 } else { /* WSTRW wRd */
1613 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1614 gen_aa32_st32(tmp, addr, get_mem_index(s));
1617 if (insn & (1 << 22)) { /* WSTRH */
1618 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1619 gen_aa32_st16(tmp, addr, get_mem_index(s));
1620 } else { /* WSTRB */
1621 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1622 gen_aa32_st8(tmp, addr, get_mem_index(s));
1626 tcg_temp_free_i32(tmp);
1628 tcg_temp_free_i32(addr);
1632 if ((insn & 0x0f000000) != 0x0e000000)
1635 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1636 case 0x000: /* WOR */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 0) & 0xf;
1639 rd1 = (insn >> 16) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
1641 gen_op_iwmmxt_orq_M0_wRn(rd1);
1642 gen_op_iwmmxt_setpsr_nz();
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 gen_op_iwmmxt_set_cup();
1647 case 0x011: /* TMCR */
1650 rd = (insn >> 12) & 0xf;
1651 wrd = (insn >> 16) & 0xf;
1653 case ARM_IWMMXT_wCID:
1654 case ARM_IWMMXT_wCASF:
1656 case ARM_IWMMXT_wCon:
1657 gen_op_iwmmxt_set_cup();
1659 case ARM_IWMMXT_wCSSF:
1660 tmp = iwmmxt_load_creg(wrd);
1661 tmp2 = load_reg(s, rd);
1662 tcg_gen_andc_i32(tmp, tmp, tmp2);
1663 tcg_temp_free_i32(tmp2);
1664 iwmmxt_store_creg(wrd, tmp);
1666 case ARM_IWMMXT_wCGR0:
1667 case ARM_IWMMXT_wCGR1:
1668 case ARM_IWMMXT_wCGR2:
1669 case ARM_IWMMXT_wCGR3:
1670 gen_op_iwmmxt_set_cup();
1671 tmp = load_reg(s, rd);
1672 iwmmxt_store_creg(wrd, tmp);
1678 case 0x100: /* WXOR */
1679 wrd = (insn >> 12) & 0xf;
1680 rd0 = (insn >> 0) & 0xf;
1681 rd1 = (insn >> 16) & 0xf;
1682 gen_op_iwmmxt_movq_M0_wRn(rd0);
1683 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1684 gen_op_iwmmxt_setpsr_nz();
1685 gen_op_iwmmxt_movq_wRn_M0(wrd);
1686 gen_op_iwmmxt_set_mup();
1687 gen_op_iwmmxt_set_cup();
1689 case 0x111: /* TMRC */
1692 rd = (insn >> 12) & 0xf;
1693 wrd = (insn >> 16) & 0xf;
1694 tmp = iwmmxt_load_creg(wrd);
1695 store_reg(s, rd, tmp);
1697 case 0x300: /* WANDN */
1698 wrd = (insn >> 12) & 0xf;
1699 rd0 = (insn >> 0) & 0xf;
1700 rd1 = (insn >> 16) & 0xf;
1701 gen_op_iwmmxt_movq_M0_wRn(rd0);
1702 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1703 gen_op_iwmmxt_andq_M0_wRn(rd1);
1704 gen_op_iwmmxt_setpsr_nz();
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 gen_op_iwmmxt_set_cup();
1709 case 0x200: /* WAND */
1710 wrd = (insn >> 12) & 0xf;
1711 rd0 = (insn >> 0) & 0xf;
1712 rd1 = (insn >> 16) & 0xf;
1713 gen_op_iwmmxt_movq_M0_wRn(rd0);
1714 gen_op_iwmmxt_andq_M0_wRn(rd1);
1715 gen_op_iwmmxt_setpsr_nz();
1716 gen_op_iwmmxt_movq_wRn_M0(wrd);
1717 gen_op_iwmmxt_set_mup();
1718 gen_op_iwmmxt_set_cup();
1720 case 0x810: case 0xa10: /* WMADD */
1721 wrd = (insn >> 12) & 0xf;
1722 rd0 = (insn >> 0) & 0xf;
1723 rd1 = (insn >> 16) & 0xf;
1724 gen_op_iwmmxt_movq_M0_wRn(rd0);
1725 if (insn & (1 << 21))
1726 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1728 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1732 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1733 wrd = (insn >> 12) & 0xf;
1734 rd0 = (insn >> 16) & 0xf;
1735 rd1 = (insn >> 0) & 0xf;
1736 gen_op_iwmmxt_movq_M0_wRn(rd0);
1737 switch ((insn >> 22) & 3) {
1739 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1742 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1745 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1750 gen_op_iwmmxt_movq_wRn_M0(wrd);
1751 gen_op_iwmmxt_set_mup();
1752 gen_op_iwmmxt_set_cup();
1754 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1755 wrd = (insn >> 12) & 0xf;
1756 rd0 = (insn >> 16) & 0xf;
1757 rd1 = (insn >> 0) & 0xf;
1758 gen_op_iwmmxt_movq_M0_wRn(rd0);
1759 switch ((insn >> 22) & 3) {
1761 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1764 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1767 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 gen_op_iwmmxt_set_cup();
1776 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1777 wrd = (insn >> 12) & 0xf;
1778 rd0 = (insn >> 16) & 0xf;
1779 rd1 = (insn >> 0) & 0xf;
1780 gen_op_iwmmxt_movq_M0_wRn(rd0);
1781 if (insn & (1 << 22))
1782 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1784 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1785 if (!(insn & (1 << 20)))
1786 gen_op_iwmmxt_addl_M0_wRn(wrd);
1787 gen_op_iwmmxt_movq_wRn_M0(wrd);
1788 gen_op_iwmmxt_set_mup();
1790 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1791 wrd = (insn >> 12) & 0xf;
1792 rd0 = (insn >> 16) & 0xf;
1793 rd1 = (insn >> 0) & 0xf;
1794 gen_op_iwmmxt_movq_M0_wRn(rd0);
1795 if (insn & (1 << 21)) {
1796 if (insn & (1 << 20))
1797 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1799 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1801 if (insn & (1 << 20))
1802 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1804 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1809 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 rd1 = (insn >> 0) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
1814 if (insn & (1 << 21))
1815 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1817 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1818 if (!(insn & (1 << 20))) {
1819 iwmmxt_load_reg(cpu_V1, wrd);
1820 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1822 gen_op_iwmmxt_movq_wRn_M0(wrd);
1823 gen_op_iwmmxt_set_mup();
1825 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1826 wrd = (insn >> 12) & 0xf;
1827 rd0 = (insn >> 16) & 0xf;
1828 rd1 = (insn >> 0) & 0xf;
1829 gen_op_iwmmxt_movq_M0_wRn(rd0);
1830 switch ((insn >> 22) & 3) {
1832 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1835 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1838 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 gen_op_iwmmxt_set_cup();
1847 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 16) & 0xf;
1850 rd1 = (insn >> 0) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 if (insn & (1 << 22)) {
1853 if (insn & (1 << 20))
1854 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1856 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1858 if (insn & (1 << 20))
1859 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1861 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1863 gen_op_iwmmxt_movq_wRn_M0(wrd);
1864 gen_op_iwmmxt_set_mup();
1865 gen_op_iwmmxt_set_cup();
1867 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1868 wrd = (insn >> 12) & 0xf;
1869 rd0 = (insn >> 16) & 0xf;
1870 rd1 = (insn >> 0) & 0xf;
1871 gen_op_iwmmxt_movq_M0_wRn(rd0);
1872 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1873 tcg_gen_andi_i32(tmp, tmp, 7);
1874 iwmmxt_load_reg(cpu_V1, rd1);
1875 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1876 tcg_temp_free_i32(tmp);
1877 gen_op_iwmmxt_movq_wRn_M0(wrd);
1878 gen_op_iwmmxt_set_mup();
1880 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1881 if (((insn >> 6) & 3) == 3)
1883 rd = (insn >> 12) & 0xf;
1884 wrd = (insn >> 16) & 0xf;
1885 tmp = load_reg(s, rd);
1886 gen_op_iwmmxt_movq_M0_wRn(wrd);
1887 switch ((insn >> 6) & 3) {
1889 tmp2 = tcg_const_i32(0xff);
1890 tmp3 = tcg_const_i32((insn & 7) << 3);
1893 tmp2 = tcg_const_i32(0xffff);
1894 tmp3 = tcg_const_i32((insn & 3) << 4);
1897 tmp2 = tcg_const_i32(0xffffffff);
1898 tmp3 = tcg_const_i32((insn & 1) << 5);
1901 TCGV_UNUSED_I32(tmp2);
1902 TCGV_UNUSED_I32(tmp3);
1904 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1905 tcg_temp_free_i32(tmp3);
1906 tcg_temp_free_i32(tmp2);
1907 tcg_temp_free_i32(tmp);
1908 gen_op_iwmmxt_movq_wRn_M0(wrd);
1909 gen_op_iwmmxt_set_mup();
1911 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1912 rd = (insn >> 12) & 0xf;
1913 wrd = (insn >> 16) & 0xf;
1914 if (rd == 15 || ((insn >> 22) & 3) == 3)
1916 gen_op_iwmmxt_movq_M0_wRn(wrd);
1917 tmp = tcg_temp_new_i32();
1918 switch ((insn >> 22) & 3) {
1920 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1921 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1923 tcg_gen_ext8s_i32(tmp, tmp);
1925 tcg_gen_andi_i32(tmp, tmp, 0xff);
1929 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1930 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1932 tcg_gen_ext16s_i32(tmp, tmp);
1934 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1938 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1939 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1942 store_reg(s, rd, tmp);
1944 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1945 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1947 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1948 switch ((insn >> 22) & 3) {
1950 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1953 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1956 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1959 tcg_gen_shli_i32(tmp, tmp, 28);
1961 tcg_temp_free_i32(tmp);
1963 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1964 if (((insn >> 6) & 3) == 3)
1966 rd = (insn >> 12) & 0xf;
1967 wrd = (insn >> 16) & 0xf;
1968 tmp = load_reg(s, rd);
1969 switch ((insn >> 6) & 3) {
1971 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1974 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1977 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1980 tcg_temp_free_i32(tmp);
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1984 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1985 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1987 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1988 tmp2 = tcg_temp_new_i32();
1989 tcg_gen_mov_i32(tmp2, tmp);
1990 switch ((insn >> 22) & 3) {
1992 for (i = 0; i < 7; i ++) {
1993 tcg_gen_shli_i32(tmp2, tmp2, 4);
1994 tcg_gen_and_i32(tmp, tmp, tmp2);
1998 for (i = 0; i < 3; i ++) {
1999 tcg_gen_shli_i32(tmp2, tmp2, 8);
2000 tcg_gen_and_i32(tmp, tmp, tmp2);
2004 tcg_gen_shli_i32(tmp2, tmp2, 16);
2005 tcg_gen_and_i32(tmp, tmp, tmp2);
2009 tcg_temp_free_i32(tmp2);
2010 tcg_temp_free_i32(tmp);
2012 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2013 wrd = (insn >> 12) & 0xf;
2014 rd0 = (insn >> 16) & 0xf;
2015 gen_op_iwmmxt_movq_M0_wRn(rd0);
2016 switch ((insn >> 22) & 3) {
2018 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2021 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2024 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2032 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2033 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2035 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2036 tmp2 = tcg_temp_new_i32();
2037 tcg_gen_mov_i32(tmp2, tmp);
2038 switch ((insn >> 22) & 3) {
2040 for (i = 0; i < 7; i ++) {
2041 tcg_gen_shli_i32(tmp2, tmp2, 4);
2042 tcg_gen_or_i32(tmp, tmp, tmp2);
2046 for (i = 0; i < 3; i ++) {
2047 tcg_gen_shli_i32(tmp2, tmp2, 8);
2048 tcg_gen_or_i32(tmp, tmp, tmp2);
2052 tcg_gen_shli_i32(tmp2, tmp2, 16);
2053 tcg_gen_or_i32(tmp, tmp, tmp2);
2057 tcg_temp_free_i32(tmp2);
2058 tcg_temp_free_i32(tmp);
2060 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2061 rd = (insn >> 12) & 0xf;
2062 rd0 = (insn >> 16) & 0xf;
2063 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2065 gen_op_iwmmxt_movq_M0_wRn(rd0);
2066 tmp = tcg_temp_new_i32();
2067 switch ((insn >> 22) & 3) {
2069 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2072 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2075 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2078 store_reg(s, rd, tmp);
2080 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2081 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2082 wrd = (insn >> 12) & 0xf;
2083 rd0 = (insn >> 16) & 0xf;
2084 rd1 = (insn >> 0) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0);
2086 switch ((insn >> 22) & 3) {
2088 if (insn & (1 << 21))
2089 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2091 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2097 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2103 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2108 gen_op_iwmmxt_movq_wRn_M0(wrd);
2109 gen_op_iwmmxt_set_mup();
2110 gen_op_iwmmxt_set_cup();
2112 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2113 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2114 wrd = (insn >> 12) & 0xf;
2115 rd0 = (insn >> 16) & 0xf;
2116 gen_op_iwmmxt_movq_M0_wRn(rd0);
2117 switch ((insn >> 22) & 3) {
2119 if (insn & (1 << 21))
2120 gen_op_iwmmxt_unpacklsb_M0();
2122 gen_op_iwmmxt_unpacklub_M0();
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_unpacklsw_M0();
2128 gen_op_iwmmxt_unpackluw_M0();
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_unpacklsl_M0();
2134 gen_op_iwmmxt_unpacklul_M0();
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2143 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2144 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 switch ((insn >> 22) & 3) {
2150 if (insn & (1 << 21))
2151 gen_op_iwmmxt_unpackhsb_M0();
2153 gen_op_iwmmxt_unpackhub_M0();
2156 if (insn & (1 << 21))
2157 gen_op_iwmmxt_unpackhsw_M0();
2159 gen_op_iwmmxt_unpackhuw_M0();
2162 if (insn & (1 << 21))
2163 gen_op_iwmmxt_unpackhsl_M0();
2165 gen_op_iwmmxt_unpackhul_M0();
2170 gen_op_iwmmxt_movq_wRn_M0(wrd);
2171 gen_op_iwmmxt_set_mup();
2172 gen_op_iwmmxt_set_cup();
2174 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2175 case 0x214: case 0x614: case 0xa14: case 0xe14:
2176 if (((insn >> 22) & 3) == 0)
2178 wrd = (insn >> 12) & 0xf;
2179 rd0 = (insn >> 16) & 0xf;
2180 gen_op_iwmmxt_movq_M0_wRn(rd0);
2181 tmp = tcg_temp_new_i32();
2182 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2183 tcg_temp_free_i32(tmp);
2186 switch ((insn >> 22) & 3) {
2188 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2191 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2194 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2197 tcg_temp_free_i32(tmp);
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 gen_op_iwmmxt_set_cup();
2202 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2203 case 0x014: case 0x414: case 0x814: case 0xc14:
2204 if (((insn >> 22) & 3) == 0)
2206 wrd = (insn >> 12) & 0xf;
2207 rd0 = (insn >> 16) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0);
2209 tmp = tcg_temp_new_i32();
2210 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2211 tcg_temp_free_i32(tmp);
2214 switch ((insn >> 22) & 3) {
2216 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2219 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2222 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2225 tcg_temp_free_i32(tmp);
2226 gen_op_iwmmxt_movq_wRn_M0(wrd);
2227 gen_op_iwmmxt_set_mup();
2228 gen_op_iwmmxt_set_cup();
2230 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2231 case 0x114: case 0x514: case 0x914: case 0xd14:
2232 if (((insn >> 22) & 3) == 0)
2234 wrd = (insn >> 12) & 0xf;
2235 rd0 = (insn >> 16) & 0xf;
2236 gen_op_iwmmxt_movq_M0_wRn(rd0);
2237 tmp = tcg_temp_new_i32();
2238 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2239 tcg_temp_free_i32(tmp);
2242 switch ((insn >> 22) & 3) {
2244 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2247 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2250 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2253 tcg_temp_free_i32(tmp);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2258 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2259 case 0x314: case 0x714: case 0xb14: case 0xf14:
2260 if (((insn >> 22) & 3) == 0)
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
2265 tmp = tcg_temp_new_i32();
2266 switch ((insn >> 22) & 3) {
2268 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2269 tcg_temp_free_i32(tmp);
2272 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2275 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2276 tcg_temp_free_i32(tmp);
2279 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2282 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2283 tcg_temp_free_i32(tmp);
2286 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2289 tcg_temp_free_i32(tmp);
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2294 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2295 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
2300 switch ((insn >> 22) & 3) {
2302 if (insn & (1 << 21))
2303 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2305 gen_op_iwmmxt_minub_M0_wRn(rd1);
2308 if (insn & (1 << 21))
2309 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2311 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2314 if (insn & (1 << 21))
2315 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2317 gen_op_iwmmxt_minul_M0_wRn(rd1);
2322 gen_op_iwmmxt_movq_wRn_M0(wrd);
2323 gen_op_iwmmxt_set_mup();
2325 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2326 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2327 wrd = (insn >> 12) & 0xf;
2328 rd0 = (insn >> 16) & 0xf;
2329 rd1 = (insn >> 0) & 0xf;
2330 gen_op_iwmmxt_movq_M0_wRn(rd0);
2331 switch ((insn >> 22) & 3) {
2333 if (insn & (1 << 21))
2334 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2336 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2339 if (insn & (1 << 21))
2340 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2342 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2345 if (insn & (1 << 21))
2346 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2348 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2353 gen_op_iwmmxt_movq_wRn_M0(wrd);
2354 gen_op_iwmmxt_set_mup();
2356 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2357 case 0x402: case 0x502: case 0x602: case 0x702:
2358 wrd = (insn >> 12) & 0xf;
2359 rd0 = (insn >> 16) & 0xf;
2360 rd1 = (insn >> 0) & 0xf;
2361 gen_op_iwmmxt_movq_M0_wRn(rd0);
2362 tmp = tcg_const_i32((insn >> 20) & 3);
2363 iwmmxt_load_reg(cpu_V1, rd1);
2364 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2365 tcg_temp_free_i32(tmp);
2366 gen_op_iwmmxt_movq_wRn_M0(wrd);
2367 gen_op_iwmmxt_set_mup();
2369 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2370 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2371 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2372 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2373 wrd = (insn >> 12) & 0xf;
2374 rd0 = (insn >> 16) & 0xf;
2375 rd1 = (insn >> 0) & 0xf;
2376 gen_op_iwmmxt_movq_M0_wRn(rd0);
2377 switch ((insn >> 20) & 0xf) {
2379 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2382 gen_op_iwmmxt_subub_M0_wRn(rd1);
2385 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2388 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2391 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2394 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2397 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2400 gen_op_iwmmxt_subul_M0_wRn(rd1);
2403 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2408 gen_op_iwmmxt_movq_wRn_M0(wrd);
2409 gen_op_iwmmxt_set_mup();
2410 gen_op_iwmmxt_set_cup();
2412 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2413 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2414 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2415 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2416 wrd = (insn >> 12) & 0xf;
2417 rd0 = (insn >> 16) & 0xf;
2418 gen_op_iwmmxt_movq_M0_wRn(rd0);
2419 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2420 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2421 tcg_temp_free_i32(tmp);
2422 gen_op_iwmmxt_movq_wRn_M0(wrd);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2426 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2427 case 0x418: case 0x518: case 0x618: case 0x718:
2428 case 0x818: case 0x918: case 0xa18: case 0xb18:
2429 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2430 wrd = (insn >> 12) & 0xf;
2431 rd0 = (insn >> 16) & 0xf;
2432 rd1 = (insn >> 0) & 0xf;
2433 gen_op_iwmmxt_movq_M0_wRn(rd0);
2434 switch ((insn >> 20) & 0xf) {
2436 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2439 gen_op_iwmmxt_addub_M0_wRn(rd1);
2442 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2445 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2448 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2451 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2454 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2457 gen_op_iwmmxt_addul_M0_wRn(rd1);
2460 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2465 gen_op_iwmmxt_movq_wRn_M0(wrd);
2466 gen_op_iwmmxt_set_mup();
2467 gen_op_iwmmxt_set_cup();
2469 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2470 case 0x408: case 0x508: case 0x608: case 0x708:
2471 case 0x808: case 0x908: case 0xa08: case 0xb08:
2472 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2473 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2475 wrd = (insn >> 12) & 0xf;
2476 rd0 = (insn >> 16) & 0xf;
2477 rd1 = (insn >> 0) & 0xf;
2478 gen_op_iwmmxt_movq_M0_wRn(rd0);
2479 switch ((insn >> 22) & 3) {
2481 if (insn & (1 << 21))
2482 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2484 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2487 if (insn & (1 << 21))
2488 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2490 gen_op_iwmmxt_packul_M0_wRn(rd1);
2493 if (insn & (1 << 21))
2494 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2496 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2499 gen_op_iwmmxt_movq_wRn_M0(wrd);
2500 gen_op_iwmmxt_set_mup();
2501 gen_op_iwmmxt_set_cup();
2503 case 0x201: case 0x203: case 0x205: case 0x207:
2504 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2505 case 0x211: case 0x213: case 0x215: case 0x217:
2506 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2507 wrd = (insn >> 5) & 0xf;
2508 rd0 = (insn >> 12) & 0xf;
2509 rd1 = (insn >> 0) & 0xf;
2510 if (rd0 == 0xf || rd1 == 0xf)
2512 gen_op_iwmmxt_movq_M0_wRn(wrd);
2513 tmp = load_reg(s, rd0);
2514 tmp2 = load_reg(s, rd1);
2515 switch ((insn >> 16) & 0xf) {
2516 case 0x0: /* TMIA */
2517 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2519 case 0x8: /* TMIAPH */
2520 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2522 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2523 if (insn & (1 << 16))
2524 tcg_gen_shri_i32(tmp, tmp, 16);
2525 if (insn & (1 << 17))
2526 tcg_gen_shri_i32(tmp2, tmp2, 16);
2527 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2530 tcg_temp_free_i32(tmp2);
2531 tcg_temp_free_i32(tmp);
2534 tcg_temp_free_i32(tmp2);
2535 tcg_temp_free_i32(tmp);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2546 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2547 (ie. an undefined instruction). */
2548 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2550 int acc, rd0, rd1, rdhi, rdlo;
2553 if ((insn & 0x0ff00f10) == 0x0e200010) {
2554 /* Multiply with Internal Accumulate Format */
2555 rd0 = (insn >> 12) & 0xf;
2557 acc = (insn >> 5) & 7;
2562 tmp = load_reg(s, rd0);
2563 tmp2 = load_reg(s, rd1);
2564 switch ((insn >> 16) & 0xf) {
2566 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2568 case 0x8: /* MIAPH */
2569 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2571 case 0xc: /* MIABB */
2572 case 0xd: /* MIABT */
2573 case 0xe: /* MIATB */
2574 case 0xf: /* MIATT */
2575 if (insn & (1 << 16))
2576 tcg_gen_shri_i32(tmp, tmp, 16);
2577 if (insn & (1 << 17))
2578 tcg_gen_shri_i32(tmp2, tmp2, 16);
2579 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2584 tcg_temp_free_i32(tmp2);
2585 tcg_temp_free_i32(tmp);
2587 gen_op_iwmmxt_movq_wRn_M0(acc);
2591 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2592 /* Internal Accumulator Access Format */
2593 rdhi = (insn >> 16) & 0xf;
2594 rdlo = (insn >> 12) & 0xf;
2600 if (insn & ARM_CP_RW_BIT) { /* MRA */
2601 iwmmxt_load_reg(cpu_V0, acc);
2602 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2603 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2604 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2605 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2607 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2608 iwmmxt_store_reg(cpu_V0, acc);
2616 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2617 #define VFP_SREG(insn, bigbit, smallbit) \
2618 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2619 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2620 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2621 reg = (((insn) >> (bigbit)) & 0x0f) \
2622 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2624 if (insn & (1 << (smallbit))) \
2626 reg = ((insn) >> (bigbit)) & 0x0f; \
2629 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2630 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2631 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2632 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2633 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2634 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2636 /* Move between integer and VFP cores. */
2637 static TCGv_i32 gen_vfp_mrs(void)
2639 TCGv_i32 tmp = tcg_temp_new_i32();
2640 tcg_gen_mov_i32(tmp, cpu_F0s);
2644 static void gen_vfp_msr(TCGv_i32 tmp)
2646 tcg_gen_mov_i32(cpu_F0s, tmp);
2647 tcg_temp_free_i32(tmp);
2650 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2652 TCGv_i32 tmp = tcg_temp_new_i32();
2654 tcg_gen_shri_i32(var, var, shift);
2655 tcg_gen_ext8u_i32(var, var);
2656 tcg_gen_shli_i32(tmp, var, 8);
2657 tcg_gen_or_i32(var, var, tmp);
2658 tcg_gen_shli_i32(tmp, var, 16);
2659 tcg_gen_or_i32(var, var, tmp);
2660 tcg_temp_free_i32(tmp);
2663 static void gen_neon_dup_low16(TCGv_i32 var)
2665 TCGv_i32 tmp = tcg_temp_new_i32();
2666 tcg_gen_ext16u_i32(var, var);
2667 tcg_gen_shli_i32(tmp, var, 16);
2668 tcg_gen_or_i32(var, var, tmp);
2669 tcg_temp_free_i32(tmp);
2672 static void gen_neon_dup_high16(TCGv_i32 var)
2674 TCGv_i32 tmp = tcg_temp_new_i32();
2675 tcg_gen_andi_i32(var, var, 0xffff0000);
2676 tcg_gen_shri_i32(tmp, var, 16);
2677 tcg_gen_or_i32(var, var, tmp);
2678 tcg_temp_free_i32(tmp);
2681 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2683 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2684 TCGv_i32 tmp = tcg_temp_new_i32();
2687 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2688 gen_neon_dup_u8(tmp, 0);
2691 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2692 gen_neon_dup_low16(tmp);
2695 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2697 default: /* Avoid compiler warnings. */
2703 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2706 uint32_t cc = extract32(insn, 20, 2);
2709 TCGv_i64 frn, frm, dest;
2710 TCGv_i64 tmp, zero, zf, nf, vf;
2712 zero = tcg_const_i64(0);
2714 frn = tcg_temp_new_i64();
2715 frm = tcg_temp_new_i64();
2716 dest = tcg_temp_new_i64();
2718 zf = tcg_temp_new_i64();
2719 nf = tcg_temp_new_i64();
2720 vf = tcg_temp_new_i64();
2722 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2723 tcg_gen_ext_i32_i64(nf, cpu_NF);
2724 tcg_gen_ext_i32_i64(vf, cpu_VF);
2726 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2727 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2730 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2734 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2737 case 2: /* ge: N == V -> N ^ V == 0 */
2738 tmp = tcg_temp_new_i64();
2739 tcg_gen_xor_i64(tmp, vf, nf);
2740 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2742 tcg_temp_free_i64(tmp);
2744 case 3: /* gt: !Z && N == V */
2745 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2747 tmp = tcg_temp_new_i64();
2748 tcg_gen_xor_i64(tmp, vf, nf);
2749 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2751 tcg_temp_free_i64(tmp);
2754 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2755 tcg_temp_free_i64(frn);
2756 tcg_temp_free_i64(frm);
2757 tcg_temp_free_i64(dest);
2759 tcg_temp_free_i64(zf);
2760 tcg_temp_free_i64(nf);
2761 tcg_temp_free_i64(vf);
2763 tcg_temp_free_i64(zero);
2765 TCGv_i32 frn, frm, dest;
2768 zero = tcg_const_i32(0);
2770 frn = tcg_temp_new_i32();
2771 frm = tcg_temp_new_i32();
2772 dest = tcg_temp_new_i32();
2773 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2774 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2777 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2781 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2784 case 2: /* ge: N == V -> N ^ V == 0 */
2785 tmp = tcg_temp_new_i32();
2786 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2787 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2789 tcg_temp_free_i32(tmp);
2791 case 3: /* gt: !Z && N == V */
2792 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2794 tmp = tcg_temp_new_i32();
2795 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2796 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2798 tcg_temp_free_i32(tmp);
2801 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2802 tcg_temp_free_i32(frn);
2803 tcg_temp_free_i32(frm);
2804 tcg_temp_free_i32(dest);
2806 tcg_temp_free_i32(zero);
2812 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2813 uint32_t rm, uint32_t dp)
2815 uint32_t vmin = extract32(insn, 6, 1);
2816 TCGv_ptr fpst = get_fpstatus_ptr(0);
2819 TCGv_i64 frn, frm, dest;
2821 frn = tcg_temp_new_i64();
2822 frm = tcg_temp_new_i64();
2823 dest = tcg_temp_new_i64();
2825 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2826 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2828 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2830 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2832 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2833 tcg_temp_free_i64(frn);
2834 tcg_temp_free_i64(frm);
2835 tcg_temp_free_i64(dest);
2837 TCGv_i32 frn, frm, dest;
2839 frn = tcg_temp_new_i32();
2840 frm = tcg_temp_new_i32();
2841 dest = tcg_temp_new_i32();
2843 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2844 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2846 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2848 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2850 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2851 tcg_temp_free_i32(frn);
2852 tcg_temp_free_i32(frm);
2853 tcg_temp_free_i32(dest);
2856 tcg_temp_free_ptr(fpst);
2860 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2863 TCGv_ptr fpst = get_fpstatus_ptr(0);
2866 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2867 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2872 tcg_op = tcg_temp_new_i64();
2873 tcg_res = tcg_temp_new_i64();
2874 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2875 gen_helper_rintd(tcg_res, tcg_op, fpst);
2876 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2877 tcg_temp_free_i64(tcg_op);
2878 tcg_temp_free_i64(tcg_res);
2882 tcg_op = tcg_temp_new_i32();
2883 tcg_res = tcg_temp_new_i32();
2884 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2885 gen_helper_rints(tcg_res, tcg_op, fpst);
2886 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2887 tcg_temp_free_i32(tcg_op);
2888 tcg_temp_free_i32(tcg_res);
2891 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2892 tcg_temp_free_i32(tcg_rmode);
2894 tcg_temp_free_ptr(fpst);
2898 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2901 bool is_signed = extract32(insn, 7, 1);
2902 TCGv_ptr fpst = get_fpstatus_ptr(0);
2903 TCGv_i32 tcg_rmode, tcg_shift;
2905 tcg_shift = tcg_const_i32(0);
2907 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2908 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2911 TCGv_i64 tcg_double, tcg_res;
2913 /* Rd is encoded as a single precision register even when the source
2914 * is double precision.
2916 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2917 tcg_double = tcg_temp_new_i64();
2918 tcg_res = tcg_temp_new_i64();
2919 tcg_tmp = tcg_temp_new_i32();
2920 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2922 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2924 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2926 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2927 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2928 tcg_temp_free_i32(tcg_tmp);
2929 tcg_temp_free_i64(tcg_res);
2930 tcg_temp_free_i64(tcg_double);
2932 TCGv_i32 tcg_single, tcg_res;
2933 tcg_single = tcg_temp_new_i32();
2934 tcg_res = tcg_temp_new_i32();
2935 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2937 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2939 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2941 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2942 tcg_temp_free_i32(tcg_res);
2943 tcg_temp_free_i32(tcg_single);
2946 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2947 tcg_temp_free_i32(tcg_rmode);
2949 tcg_temp_free_i32(tcg_shift);
2951 tcg_temp_free_ptr(fpst);
2956 /* Table for converting the most common AArch32 encoding of
2957 * rounding mode to arm_fprounding order (which matches the
2958 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2960 static const uint8_t fp_decode_rm[] = {
2967 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
2969 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2971 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
2976 VFP_DREG_D(rd, insn);
2977 VFP_DREG_N(rn, insn);
2978 VFP_DREG_M(rm, insn);
2980 rd = VFP_SREG_D(insn);
2981 rn = VFP_SREG_N(insn);
2982 rm = VFP_SREG_M(insn);
2985 if ((insn & 0x0f800e50) == 0x0e000a00) {
2986 return handle_vsel(insn, rd, rn, rm, dp);
2987 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2988 return handle_vminmaxnm(insn, rd, rn, rm, dp);
2989 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2990 /* VRINTA, VRINTN, VRINTP, VRINTM */
2991 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2992 return handle_vrint(insn, rd, rm, dp, rounding);
2993 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2994 /* VCVTA, VCVTN, VCVTP, VCVTM */
2995 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2996 return handle_vcvt(insn, rd, rm, dp, rounding);
3001 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3002 (ie. an undefined instruction). */
3003 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3005 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3011 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3015 /* FIXME: this access check should not take precedence over UNDEF
3016 * for invalid encodings; we will generate incorrect syndrome information
3017 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3019 if (!s->cpacr_fpen) {
3020 gen_exception_insn(s, 4, EXCP_UDEF,
3021 syn_fp_access_trap(1, 0xe, s->thumb));
3025 if (!s->vfp_enabled) {
3026 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3027 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3029 rn = (insn >> 16) & 0xf;
3030 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3031 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3036 if (extract32(insn, 28, 4) == 0xf) {
3037 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3038 * only used in v8 and above.
3040 return disas_vfp_v8_insn(s, insn);
3043 dp = ((insn & 0xf00) == 0xb00);
3044 switch ((insn >> 24) & 0xf) {
3046 if (insn & (1 << 4)) {
3047 /* single register transfer */
3048 rd = (insn >> 12) & 0xf;
3053 VFP_DREG_N(rn, insn);
3056 if (insn & 0x00c00060
3057 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3061 pass = (insn >> 21) & 1;
3062 if (insn & (1 << 22)) {
3064 offset = ((insn >> 5) & 3) * 8;
3065 } else if (insn & (1 << 5)) {
3067 offset = (insn & (1 << 6)) ? 16 : 0;
3072 if (insn & ARM_CP_RW_BIT) {
3074 tmp = neon_load_reg(rn, pass);
3078 tcg_gen_shri_i32(tmp, tmp, offset);
3079 if (insn & (1 << 23))
3085 if (insn & (1 << 23)) {
3087 tcg_gen_shri_i32(tmp, tmp, 16);
3093 tcg_gen_sari_i32(tmp, tmp, 16);
3102 store_reg(s, rd, tmp);
3105 tmp = load_reg(s, rd);
3106 if (insn & (1 << 23)) {
3109 gen_neon_dup_u8(tmp, 0);
3110 } else if (size == 1) {
3111 gen_neon_dup_low16(tmp);
3113 for (n = 0; n <= pass * 2; n++) {
3114 tmp2 = tcg_temp_new_i32();
3115 tcg_gen_mov_i32(tmp2, tmp);
3116 neon_store_reg(rn, n, tmp2);
3118 neon_store_reg(rn, n, tmp);
3123 tmp2 = neon_load_reg(rn, pass);
3124 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3125 tcg_temp_free_i32(tmp2);
3128 tmp2 = neon_load_reg(rn, pass);
3129 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3130 tcg_temp_free_i32(tmp2);
3135 neon_store_reg(rn, pass, tmp);
3139 if ((insn & 0x6f) != 0x00)
3141 rn = VFP_SREG_N(insn);
3142 if (insn & ARM_CP_RW_BIT) {
3144 if (insn & (1 << 21)) {
3145 /* system register */
3150 /* VFP2 allows access to FSID from userspace.
3151 VFP3 restricts all id registers to privileged
3154 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3157 tmp = load_cpu_field(vfp.xregs[rn]);
3162 tmp = load_cpu_field(vfp.xregs[rn]);
3164 case ARM_VFP_FPINST:
3165 case ARM_VFP_FPINST2:
3166 /* Not present in VFP3. */
3168 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3171 tmp = load_cpu_field(vfp.xregs[rn]);
3175 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3176 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3178 tmp = tcg_temp_new_i32();
3179 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3183 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3190 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3193 tmp = load_cpu_field(vfp.xregs[rn]);
3199 gen_mov_F0_vreg(0, rn);
3200 tmp = gen_vfp_mrs();
3203 /* Set the 4 flag bits in the CPSR. */
3205 tcg_temp_free_i32(tmp);
3207 store_reg(s, rd, tmp);
3211 if (insn & (1 << 21)) {
3213 /* system register */
3218 /* Writes are ignored. */
3221 tmp = load_reg(s, rd);
3222 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3223 tcg_temp_free_i32(tmp);
3229 /* TODO: VFP subarchitecture support.
3230 * For now, keep the EN bit only */
3231 tmp = load_reg(s, rd);
3232 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3233 store_cpu_field(tmp, vfp.xregs[rn]);
3236 case ARM_VFP_FPINST:
3237 case ARM_VFP_FPINST2:
3241 tmp = load_reg(s, rd);
3242 store_cpu_field(tmp, vfp.xregs[rn]);
3248 tmp = load_reg(s, rd);
3250 gen_mov_vreg_F0(0, rn);
3255 /* data processing */
3256 /* The opcode is in bits 23, 21, 20 and 6. */
3257 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3261 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3263 /* rn is register number */
3264 VFP_DREG_N(rn, insn);
3267 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3268 ((rn & 0x1e) == 0x6))) {
3269 /* Integer or single/half precision destination. */
3270 rd = VFP_SREG_D(insn);
3272 VFP_DREG_D(rd, insn);
3275 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3276 ((rn & 0x1e) == 0x4))) {
3277 /* VCVT from int or half precision is always from S reg
3278 * regardless of dp bit. VCVT with immediate frac_bits
3279 * has same format as SREG_M.
3281 rm = VFP_SREG_M(insn);
3283 VFP_DREG_M(rm, insn);
3286 rn = VFP_SREG_N(insn);
3287 if (op == 15 && rn == 15) {
3288 /* Double precision destination. */
3289 VFP_DREG_D(rd, insn);
3291 rd = VFP_SREG_D(insn);
3293 /* NB that we implicitly rely on the encoding for the frac_bits
3294 * in VCVT of fixed to float being the same as that of an SREG_M
3296 rm = VFP_SREG_M(insn);
3299 veclen = s->vec_len;
3300 if (op == 15 && rn > 3)
3303 /* Shut up compiler warnings. */
3314 /* Figure out what type of vector operation this is. */
3315 if ((rd & bank_mask) == 0) {
3320 delta_d = (s->vec_stride >> 1) + 1;
3322 delta_d = s->vec_stride + 1;
3324 if ((rm & bank_mask) == 0) {
3325 /* mixed scalar/vector */
3334 /* Load the initial operands. */
3339 /* Integer source */
3340 gen_mov_F0_vreg(0, rm);
3345 gen_mov_F0_vreg(dp, rd);
3346 gen_mov_F1_vreg(dp, rm);
3350 /* Compare with zero */
3351 gen_mov_F0_vreg(dp, rd);
3362 /* Source and destination the same. */
3363 gen_mov_F0_vreg(dp, rd);
3369 /* VCVTB, VCVTT: only present with the halfprec extension
3370 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3371 * (we choose to UNDEF)
3373 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3374 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3377 if (!extract32(rn, 1, 1)) {
3378 /* Half precision source. */
3379 gen_mov_F0_vreg(0, rm);
3382 /* Otherwise fall through */
3384 /* One source operand. */
3385 gen_mov_F0_vreg(dp, rm);
3389 /* Two source operands. */
3390 gen_mov_F0_vreg(dp, rn);
3391 gen_mov_F1_vreg(dp, rm);
3395 /* Perform the calculation. */
3397 case 0: /* VMLA: fd + (fn * fm) */
3398 /* Note that order of inputs to the add matters for NaNs */
3400 gen_mov_F0_vreg(dp, rd);
3403 case 1: /* VMLS: fd + -(fn * fm) */
3406 gen_mov_F0_vreg(dp, rd);
3409 case 2: /* VNMLS: -fd + (fn * fm) */
3410 /* Note that it isn't valid to replace (-A + B) with (B - A)
3411 * or similar plausible looking simplifications
3412 * because this will give wrong results for NaNs.
3415 gen_mov_F0_vreg(dp, rd);
3419 case 3: /* VNMLA: -fd + -(fn * fm) */
3422 gen_mov_F0_vreg(dp, rd);
3426 case 4: /* mul: fn * fm */
3429 case 5: /* nmul: -(fn * fm) */
3433 case 6: /* add: fn + fm */
3436 case 7: /* sub: fn - fm */
3439 case 8: /* div: fn / fm */
3442 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3443 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3444 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3445 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3446 /* These are fused multiply-add, and must be done as one
3447 * floating point operation with no rounding between the
3448 * multiplication and addition steps.
3449 * NB that doing the negations here as separate steps is
3450 * correct : an input NaN should come out with its sign bit
3451 * flipped if it is a negated-input.
3453 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3461 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3463 frd = tcg_temp_new_i64();
3464 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3467 gen_helper_vfp_negd(frd, frd);
3469 fpst = get_fpstatus_ptr(0);
3470 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3471 cpu_F1d, frd, fpst);
3472 tcg_temp_free_ptr(fpst);
3473 tcg_temp_free_i64(frd);
3479 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3481 frd = tcg_temp_new_i32();
3482 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3484 gen_helper_vfp_negs(frd, frd);
3486 fpst = get_fpstatus_ptr(0);
3487 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3488 cpu_F1s, frd, fpst);
3489 tcg_temp_free_ptr(fpst);
3490 tcg_temp_free_i32(frd);
3493 case 14: /* fconst */
3494 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3498 n = (insn << 12) & 0x80000000;
3499 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3506 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3513 tcg_gen_movi_i32(cpu_F0s, n);
3516 case 15: /* extension space */
3530 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3531 tmp = gen_vfp_mrs();
3532 tcg_gen_ext16u_i32(tmp, tmp);
3534 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3537 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3540 tcg_temp_free_i32(tmp);
3542 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3543 tmp = gen_vfp_mrs();
3544 tcg_gen_shri_i32(tmp, tmp, 16);
3546 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3549 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3552 tcg_temp_free_i32(tmp);
3554 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3555 tmp = tcg_temp_new_i32();
3557 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3560 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3563 gen_mov_F0_vreg(0, rd);
3564 tmp2 = gen_vfp_mrs();
3565 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3566 tcg_gen_or_i32(tmp, tmp, tmp2);
3567 tcg_temp_free_i32(tmp2);
3570 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3571 tmp = tcg_temp_new_i32();
3573 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3576 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3579 tcg_gen_shli_i32(tmp, tmp, 16);
3580 gen_mov_F0_vreg(0, rd);
3581 tmp2 = gen_vfp_mrs();
3582 tcg_gen_ext16u_i32(tmp2, tmp2);
3583 tcg_gen_or_i32(tmp, tmp, tmp2);
3584 tcg_temp_free_i32(tmp2);
3596 case 11: /* cmpez */
3600 case 12: /* vrintr */
3602 TCGv_ptr fpst = get_fpstatus_ptr(0);
3604 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3606 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3608 tcg_temp_free_ptr(fpst);
3611 case 13: /* vrintz */
3613 TCGv_ptr fpst = get_fpstatus_ptr(0);
3615 tcg_rmode = tcg_const_i32(float_round_to_zero);
3616 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3618 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3620 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3622 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3623 tcg_temp_free_i32(tcg_rmode);
3624 tcg_temp_free_ptr(fpst);
3627 case 14: /* vrintx */
3629 TCGv_ptr fpst = get_fpstatus_ptr(0);
3631 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3633 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3635 tcg_temp_free_ptr(fpst);
3638 case 15: /* single<->double conversion */
3640 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3642 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3644 case 16: /* fuito */
3645 gen_vfp_uito(dp, 0);
3647 case 17: /* fsito */
3648 gen_vfp_sito(dp, 0);
3650 case 20: /* fshto */
3651 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3654 gen_vfp_shto(dp, 16 - rm, 0);
3656 case 21: /* fslto */
3657 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3660 gen_vfp_slto(dp, 32 - rm, 0);
3662 case 22: /* fuhto */
3663 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3666 gen_vfp_uhto(dp, 16 - rm, 0);
3668 case 23: /* fulto */
3669 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3672 gen_vfp_ulto(dp, 32 - rm, 0);
3674 case 24: /* ftoui */
3675 gen_vfp_toui(dp, 0);
3677 case 25: /* ftouiz */
3678 gen_vfp_touiz(dp, 0);
3680 case 26: /* ftosi */
3681 gen_vfp_tosi(dp, 0);
3683 case 27: /* ftosiz */
3684 gen_vfp_tosiz(dp, 0);
3686 case 28: /* ftosh */
3687 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3690 gen_vfp_tosh(dp, 16 - rm, 0);
3692 case 29: /* ftosl */
3693 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3696 gen_vfp_tosl(dp, 32 - rm, 0);
3698 case 30: /* ftouh */
3699 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3702 gen_vfp_touh(dp, 16 - rm, 0);
3704 case 31: /* ftoul */
3705 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3708 gen_vfp_toul(dp, 32 - rm, 0);
3710 default: /* undefined */
3714 default: /* undefined */
3718 /* Write back the result. */
3719 if (op == 15 && (rn >= 8 && rn <= 11)) {
3720 /* Comparison, do nothing. */
3721 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3722 (rn & 0x1e) == 0x6)) {
3723 /* VCVT double to int: always integer result.
3724 * VCVT double to half precision is always a single
3727 gen_mov_vreg_F0(0, rd);
3728 } else if (op == 15 && rn == 15) {
3730 gen_mov_vreg_F0(!dp, rd);
3732 gen_mov_vreg_F0(dp, rd);
3735 /* break out of the loop if we have finished */
3739 if (op == 15 && delta_m == 0) {
3740 /* single source one-many */
3742 rd = ((rd + delta_d) & (bank_mask - 1))
3744 gen_mov_vreg_F0(dp, rd);
3748 /* Setup the next operands. */
3750 rd = ((rd + delta_d) & (bank_mask - 1))
3754 /* One source operand. */
3755 rm = ((rm + delta_m) & (bank_mask - 1))
3757 gen_mov_F0_vreg(dp, rm);
3759 /* Two source operands. */
3760 rn = ((rn + delta_d) & (bank_mask - 1))
3762 gen_mov_F0_vreg(dp, rn);
3764 rm = ((rm + delta_m) & (bank_mask - 1))
3766 gen_mov_F1_vreg(dp, rm);
3774 if ((insn & 0x03e00000) == 0x00400000) {
3775 /* two-register transfer */
3776 rn = (insn >> 16) & 0xf;
3777 rd = (insn >> 12) & 0xf;
3779 VFP_DREG_M(rm, insn);
3781 rm = VFP_SREG_M(insn);
3784 if (insn & ARM_CP_RW_BIT) {
3787 gen_mov_F0_vreg(0, rm * 2);
3788 tmp = gen_vfp_mrs();
3789 store_reg(s, rd, tmp);
3790 gen_mov_F0_vreg(0, rm * 2 + 1);
3791 tmp = gen_vfp_mrs();
3792 store_reg(s, rn, tmp);
3794 gen_mov_F0_vreg(0, rm);
3795 tmp = gen_vfp_mrs();
3796 store_reg(s, rd, tmp);
3797 gen_mov_F0_vreg(0, rm + 1);
3798 tmp = gen_vfp_mrs();
3799 store_reg(s, rn, tmp);
3804 tmp = load_reg(s, rd);
3806 gen_mov_vreg_F0(0, rm * 2);
3807 tmp = load_reg(s, rn);
3809 gen_mov_vreg_F0(0, rm * 2 + 1);
3811 tmp = load_reg(s, rd);
3813 gen_mov_vreg_F0(0, rm);
3814 tmp = load_reg(s, rn);
3816 gen_mov_vreg_F0(0, rm + 1);
3821 rn = (insn >> 16) & 0xf;
3823 VFP_DREG_D(rd, insn);
3825 rd = VFP_SREG_D(insn);
3826 if ((insn & 0x01200000) == 0x01000000) {
3827 /* Single load/store */
3828 offset = (insn & 0xff) << 2;
3829 if ((insn & (1 << 23)) == 0)
3831 if (s->thumb && rn == 15) {
3832 /* This is actually UNPREDICTABLE */
3833 addr = tcg_temp_new_i32();
3834 tcg_gen_movi_i32(addr, s->pc & ~2);
3836 addr = load_reg(s, rn);
3838 tcg_gen_addi_i32(addr, addr, offset);
3839 if (insn & (1 << 20)) {
3840 gen_vfp_ld(s, dp, addr);
3841 gen_mov_vreg_F0(dp, rd);
3843 gen_mov_F0_vreg(dp, rd);
3844 gen_vfp_st(s, dp, addr);
3846 tcg_temp_free_i32(addr);
3848 /* load/store multiple */
3849 int w = insn & (1 << 21);
3851 n = (insn >> 1) & 0x7f;
3855 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3856 /* P == U , W == 1 => UNDEF */
3859 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3860 /* UNPREDICTABLE cases for bad immediates: we choose to
3861 * UNDEF to avoid generating huge numbers of TCG ops
3865 if (rn == 15 && w) {
3866 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3870 if (s->thumb && rn == 15) {
3871 /* This is actually UNPREDICTABLE */
3872 addr = tcg_temp_new_i32();
3873 tcg_gen_movi_i32(addr, s->pc & ~2);
3875 addr = load_reg(s, rn);
3877 if (insn & (1 << 24)) /* pre-decrement */
3878 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3884 for (i = 0; i < n; i++) {
3885 if (insn & ARM_CP_RW_BIT) {
3887 gen_vfp_ld(s, dp, addr);
3888 gen_mov_vreg_F0(dp, rd + i);
3891 gen_mov_F0_vreg(dp, rd + i);
3892 gen_vfp_st(s, dp, addr);
3894 tcg_gen_addi_i32(addr, addr, offset);
3898 if (insn & (1 << 24))
3899 offset = -offset * n;
3900 else if (dp && (insn & 1))
3906 tcg_gen_addi_i32(addr, addr, offset);
3907 store_reg(s, rn, addr);
3909 tcg_temp_free_i32(addr);
3915 /* Should never happen. */
3921 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
3923 TranslationBlock *tb;
3926 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3928 gen_set_pc_im(s, dest);
3929 tcg_gen_exit_tb((uintptr_t)tb + n);
3931 gen_set_pc_im(s, dest);
3936 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3938 if (unlikely(s->singlestep_enabled || s->ss_active)) {
3939 /* An indirect jump so that we still trigger the debug exception. */
3944 gen_goto_tb(s, 0, dest);
3945 s->is_jmp = DISAS_TB_JUMP;
3949 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
3952 tcg_gen_sari_i32(t0, t0, 16);
3956 tcg_gen_sari_i32(t1, t1, 16);
3959 tcg_gen_mul_i32(t0, t0, t1);
3962 /* Return the mask of PSR bits set by a MSR instruction. */
3963 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
3968 if (flags & (1 << 0))
3970 if (flags & (1 << 1))
3972 if (flags & (1 << 2))
3974 if (flags & (1 << 3))
3977 /* Mask out undefined bits. */
3978 mask &= ~CPSR_RESERVED;
3979 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
3982 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
3983 mask &= ~CPSR_Q; /* V5TE in reality*/
3985 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
3986 mask &= ~(CPSR_E | CPSR_GE);
3988 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
3991 /* Mask out execution state and reserved bits. */
3993 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
3995 /* Mask out privileged bits. */
4001 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4002 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4006 /* ??? This is also undefined in system mode. */
4010 tmp = load_cpu_field(spsr);
4011 tcg_gen_andi_i32(tmp, tmp, ~mask);
4012 tcg_gen_andi_i32(t0, t0, mask);
4013 tcg_gen_or_i32(tmp, tmp, t0);
4014 store_cpu_field(tmp, spsr);
4016 gen_set_cpsr(t0, mask);
4018 tcg_temp_free_i32(t0);
4023 /* Returns nonzero if access to the PSR is not permitted. */
4024 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4027 tmp = tcg_temp_new_i32();
4028 tcg_gen_movi_i32(tmp, val);
4029 return gen_set_psr(s, mask, spsr, tmp);
4032 /* Generate an old-style exception return. Marks pc as dead. */
4033 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4036 store_reg(s, 15, pc);
4037 tmp = load_cpu_field(spsr);
4038 gen_set_cpsr(tmp, CPSR_ERET_MASK);
4039 tcg_temp_free_i32(tmp);
4040 s->is_jmp = DISAS_UPDATE;
4043 /* Generate a v6 exception return. Marks both values as dead. */
4044 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4046 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
4047 tcg_temp_free_i32(cpsr);
4048 store_reg(s, 15, pc);
4049 s->is_jmp = DISAS_UPDATE;
4052 static void gen_nop_hint(DisasContext *s, int val)
4056 gen_set_pc_im(s, s->pc);
4057 s->is_jmp = DISAS_WFI;
4060 gen_set_pc_im(s, s->pc);
4061 s->is_jmp = DISAS_WFE;
4065 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4071 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4073 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4076 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4077 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4078 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4083 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4086 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4087 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4088 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4093 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4094 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4095 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4096 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4097 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4099 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4100 switch ((size << 1) | u) { \
4102 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4105 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4108 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4111 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4114 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4117 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4119 default: return 1; \
4122 #define GEN_NEON_INTEGER_OP(name) do { \
4123 switch ((size << 1) | u) { \
4125 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4128 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4131 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4134 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4137 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4140 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4142 default: return 1; \
4145 static TCGv_i32 neon_load_scratch(int scratch)
4147 TCGv_i32 tmp = tcg_temp_new_i32();
4148 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4152 static void neon_store_scratch(int scratch, TCGv_i32 var)
4154 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4155 tcg_temp_free_i32(var);
4158 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4162 tmp = neon_load_reg(reg & 7, reg >> 4);
4164 gen_neon_dup_high16(tmp);
4166 gen_neon_dup_low16(tmp);
4169 tmp = neon_load_reg(reg & 15, reg >> 4);
4174 static int gen_neon_unzip(int rd, int rm, int size, int q)
4177 if (!q && size == 2) {
4180 tmp = tcg_const_i32(rd);
4181 tmp2 = tcg_const_i32(rm);
4185 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4188 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4191 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4199 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4202 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4208 tcg_temp_free_i32(tmp);
4209 tcg_temp_free_i32(tmp2);
4213 static int gen_neon_zip(int rd, int rm, int size, int q)
4216 if (!q && size == 2) {
4219 tmp = tcg_const_i32(rd);
4220 tmp2 = tcg_const_i32(rm);
4224 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4227 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4230 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4238 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4241 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4247 tcg_temp_free_i32(tmp);
4248 tcg_temp_free_i32(tmp2);
4252 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4256 rd = tcg_temp_new_i32();
4257 tmp = tcg_temp_new_i32();
4259 tcg_gen_shli_i32(rd, t0, 8);
4260 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4261 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4262 tcg_gen_or_i32(rd, rd, tmp);
4264 tcg_gen_shri_i32(t1, t1, 8);
4265 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4266 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4267 tcg_gen_or_i32(t1, t1, tmp);
4268 tcg_gen_mov_i32(t0, rd);
4270 tcg_temp_free_i32(tmp);
4271 tcg_temp_free_i32(rd);
4274 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4278 rd = tcg_temp_new_i32();
4279 tmp = tcg_temp_new_i32();
4281 tcg_gen_shli_i32(rd, t0, 16);
4282 tcg_gen_andi_i32(tmp, t1, 0xffff);
4283 tcg_gen_or_i32(rd, rd, tmp);
4284 tcg_gen_shri_i32(t1, t1, 16);
4285 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4286 tcg_gen_or_i32(t1, t1, tmp);
4287 tcg_gen_mov_i32(t0, rd);
4289 tcg_temp_free_i32(tmp);
4290 tcg_temp_free_i32(rd);
4298 } neon_ls_element_type[11] = {
4312 /* Translate a NEON load/store element instruction. Return nonzero if the
4313 instruction is invalid. */
4314 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4333 /* FIXME: this access check should not take precedence over UNDEF
4334 * for invalid encodings; we will generate incorrect syndrome information
4335 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4337 if (!s->cpacr_fpen) {
4338 gen_exception_insn(s, 4, EXCP_UDEF,
4339 syn_fp_access_trap(1, 0xe, s->thumb));
4343 if (!s->vfp_enabled)
4345 VFP_DREG_D(rd, insn);
4346 rn = (insn >> 16) & 0xf;
4348 load = (insn & (1 << 21)) != 0;
4349 if ((insn & (1 << 23)) == 0) {
4350 /* Load store all elements. */
4351 op = (insn >> 8) & 0xf;
4352 size = (insn >> 6) & 3;
4355 /* Catch UNDEF cases for bad values of align field */
4358 if (((insn >> 5) & 1) == 1) {
4363 if (((insn >> 4) & 3) == 3) {
4370 nregs = neon_ls_element_type[op].nregs;
4371 interleave = neon_ls_element_type[op].interleave;
4372 spacing = neon_ls_element_type[op].spacing;
4373 if (size == 3 && (interleave | spacing) != 1)
4375 addr = tcg_temp_new_i32();
4376 load_reg_var(s, addr, rn);
4377 stride = (1 << size) * interleave;
4378 for (reg = 0; reg < nregs; reg++) {
4379 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4380 load_reg_var(s, addr, rn);
4381 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4382 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4383 load_reg_var(s, addr, rn);
4384 tcg_gen_addi_i32(addr, addr, 1 << size);
4387 tmp64 = tcg_temp_new_i64();
4389 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
4390 neon_store_reg64(tmp64, rd);
4392 neon_load_reg64(tmp64, rd);
4393 gen_aa32_st64(tmp64, addr, get_mem_index(s));
4395 tcg_temp_free_i64(tmp64);
4396 tcg_gen_addi_i32(addr, addr, stride);
4398 for (pass = 0; pass < 2; pass++) {
4401 tmp = tcg_temp_new_i32();
4402 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4403 neon_store_reg(rd, pass, tmp);
4405 tmp = neon_load_reg(rd, pass);
4406 gen_aa32_st32(tmp, addr, get_mem_index(s));
4407 tcg_temp_free_i32(tmp);
4409 tcg_gen_addi_i32(addr, addr, stride);
4410 } else if (size == 1) {
4412 tmp = tcg_temp_new_i32();
4413 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4414 tcg_gen_addi_i32(addr, addr, stride);
4415 tmp2 = tcg_temp_new_i32();
4416 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
4417 tcg_gen_addi_i32(addr, addr, stride);
4418 tcg_gen_shli_i32(tmp2, tmp2, 16);
4419 tcg_gen_or_i32(tmp, tmp, tmp2);
4420 tcg_temp_free_i32(tmp2);
4421 neon_store_reg(rd, pass, tmp);
4423 tmp = neon_load_reg(rd, pass);
4424 tmp2 = tcg_temp_new_i32();
4425 tcg_gen_shri_i32(tmp2, tmp, 16);
4426 gen_aa32_st16(tmp, addr, get_mem_index(s));
4427 tcg_temp_free_i32(tmp);
4428 tcg_gen_addi_i32(addr, addr, stride);
4429 gen_aa32_st16(tmp2, addr, get_mem_index(s));
4430 tcg_temp_free_i32(tmp2);
4431 tcg_gen_addi_i32(addr, addr, stride);
4433 } else /* size == 0 */ {
4435 TCGV_UNUSED_I32(tmp2);
4436 for (n = 0; n < 4; n++) {
4437 tmp = tcg_temp_new_i32();
4438 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4439 tcg_gen_addi_i32(addr, addr, stride);
4443 tcg_gen_shli_i32(tmp, tmp, n * 8);
4444 tcg_gen_or_i32(tmp2, tmp2, tmp);
4445 tcg_temp_free_i32(tmp);
4448 neon_store_reg(rd, pass, tmp2);
4450 tmp2 = neon_load_reg(rd, pass);
4451 for (n = 0; n < 4; n++) {
4452 tmp = tcg_temp_new_i32();
4454 tcg_gen_mov_i32(tmp, tmp2);
4456 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4458 gen_aa32_st8(tmp, addr, get_mem_index(s));
4459 tcg_temp_free_i32(tmp);
4460 tcg_gen_addi_i32(addr, addr, stride);
4462 tcg_temp_free_i32(tmp2);
4469 tcg_temp_free_i32(addr);
4472 size = (insn >> 10) & 3;
4474 /* Load single element to all lanes. */
4475 int a = (insn >> 4) & 1;
4479 size = (insn >> 6) & 3;
4480 nregs = ((insn >> 8) & 3) + 1;
4483 if (nregs != 4 || a == 0) {
4486 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4489 if (nregs == 1 && a == 1 && size == 0) {
4492 if (nregs == 3 && a == 1) {
4495 addr = tcg_temp_new_i32();
4496 load_reg_var(s, addr, rn);
4498 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4499 tmp = gen_load_and_replicate(s, addr, size);
4500 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4501 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4502 if (insn & (1 << 5)) {
4503 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4504 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4506 tcg_temp_free_i32(tmp);
4508 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4509 stride = (insn & (1 << 5)) ? 2 : 1;
4510 for (reg = 0; reg < nregs; reg++) {
4511 tmp = gen_load_and_replicate(s, addr, size);
4512 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4513 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4514 tcg_temp_free_i32(tmp);
4515 tcg_gen_addi_i32(addr, addr, 1 << size);
4519 tcg_temp_free_i32(addr);
4520 stride = (1 << size) * nregs;
4522 /* Single element. */
4523 int idx = (insn >> 4) & 0xf;
4524 pass = (insn >> 7) & 1;
4527 shift = ((insn >> 5) & 3) * 8;
4531 shift = ((insn >> 6) & 1) * 16;
4532 stride = (insn & (1 << 5)) ? 2 : 1;
4536 stride = (insn & (1 << 6)) ? 2 : 1;
4541 nregs = ((insn >> 8) & 3) + 1;
4542 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4545 if (((idx & (1 << size)) != 0) ||
4546 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4551 if ((idx & 1) != 0) {
4556 if (size == 2 && (idx & 2) != 0) {
4561 if ((size == 2) && ((idx & 3) == 3)) {
4568 if ((rd + stride * (nregs - 1)) > 31) {
4569 /* Attempts to write off the end of the register file
4570 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4571 * the neon_load_reg() would write off the end of the array.
4575 addr = tcg_temp_new_i32();
4576 load_reg_var(s, addr, rn);
4577 for (reg = 0; reg < nregs; reg++) {
4579 tmp = tcg_temp_new_i32();
4582 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4585 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4588 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4590 default: /* Avoid compiler warnings. */
4594 tmp2 = neon_load_reg(rd, pass);
4595 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4596 shift, size ? 16 : 8);
4597 tcg_temp_free_i32(tmp2);
4599 neon_store_reg(rd, pass, tmp);
4600 } else { /* Store */
4601 tmp = neon_load_reg(rd, pass);
4603 tcg_gen_shri_i32(tmp, tmp, shift);
4606 gen_aa32_st8(tmp, addr, get_mem_index(s));
4609 gen_aa32_st16(tmp, addr, get_mem_index(s));
4612 gen_aa32_st32(tmp, addr, get_mem_index(s));
4615 tcg_temp_free_i32(tmp);
4618 tcg_gen_addi_i32(addr, addr, 1 << size);
4620 tcg_temp_free_i32(addr);
4621 stride = nregs * (1 << size);
4627 base = load_reg(s, rn);
4629 tcg_gen_addi_i32(base, base, stride);
4632 index = load_reg(s, rm);
4633 tcg_gen_add_i32(base, base, index);
4634 tcg_temp_free_i32(index);
4636 store_reg(s, rn, base);
4641 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4642 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4644 tcg_gen_and_i32(t, t, c);
4645 tcg_gen_andc_i32(f, f, c);
4646 tcg_gen_or_i32(dest, t, f);
4649 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4652 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4653 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4654 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4659 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4662 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4663 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4664 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4669 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4672 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4673 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4674 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4679 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4682 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4683 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4684 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4689 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4695 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4696 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4701 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4702 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4709 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4710 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4715 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4716 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4723 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4727 case 0: gen_helper_neon_widen_u8(dest, src); break;
4728 case 1: gen_helper_neon_widen_u16(dest, src); break;
4729 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4734 case 0: gen_helper_neon_widen_s8(dest, src); break;
4735 case 1: gen_helper_neon_widen_s16(dest, src); break;
4736 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4740 tcg_temp_free_i32(src);
4743 static inline void gen_neon_addl(int size)
4746 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4747 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4748 case 2: tcg_gen_add_i64(CPU_V001); break;
4753 static inline void gen_neon_subl(int size)
4756 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4757 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4758 case 2: tcg_gen_sub_i64(CPU_V001); break;
4763 static inline void gen_neon_negl(TCGv_i64 var, int size)
4766 case 0: gen_helper_neon_negl_u16(var, var); break;
4767 case 1: gen_helper_neon_negl_u32(var, var); break;
4769 tcg_gen_neg_i64(var, var);
4775 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4778 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4779 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4784 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4789 switch ((size << 1) | u) {
4790 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4791 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4792 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4793 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4795 tmp = gen_muls_i64_i32(a, b);
4796 tcg_gen_mov_i64(dest, tmp);
4797 tcg_temp_free_i64(tmp);
4800 tmp = gen_mulu_i64_i32(a, b);
4801 tcg_gen_mov_i64(dest, tmp);
4802 tcg_temp_free_i64(tmp);
4807 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4808 Don't forget to clean them now. */
4810 tcg_temp_free_i32(a);
4811 tcg_temp_free_i32(b);
4815 static void gen_neon_narrow_op(int op, int u, int size,
4816 TCGv_i32 dest, TCGv_i64 src)
4820 gen_neon_unarrow_sats(size, dest, src);
4822 gen_neon_narrow(size, dest, src);
4826 gen_neon_narrow_satu(size, dest, src);
4828 gen_neon_narrow_sats(size, dest, src);
4833 /* Symbolic constants for op fields for Neon 3-register same-length.
4834 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4837 #define NEON_3R_VHADD 0
4838 #define NEON_3R_VQADD 1
4839 #define NEON_3R_VRHADD 2
4840 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4841 #define NEON_3R_VHSUB 4
4842 #define NEON_3R_VQSUB 5
4843 #define NEON_3R_VCGT 6
4844 #define NEON_3R_VCGE 7
4845 #define NEON_3R_VSHL 8
4846 #define NEON_3R_VQSHL 9
4847 #define NEON_3R_VRSHL 10
4848 #define NEON_3R_VQRSHL 11
4849 #define NEON_3R_VMAX 12
4850 #define NEON_3R_VMIN 13
4851 #define NEON_3R_VABD 14
4852 #define NEON_3R_VABA 15
4853 #define NEON_3R_VADD_VSUB 16
4854 #define NEON_3R_VTST_VCEQ 17
4855 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4856 #define NEON_3R_VMUL 19
4857 #define NEON_3R_VPMAX 20
4858 #define NEON_3R_VPMIN 21
4859 #define NEON_3R_VQDMULH_VQRDMULH 22
4860 #define NEON_3R_VPADD 23
4861 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4862 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4863 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4864 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4865 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4866 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4867 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4868 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4870 static const uint8_t neon_3r_sizes[] = {
4871 [NEON_3R_VHADD] = 0x7,
4872 [NEON_3R_VQADD] = 0xf,
4873 [NEON_3R_VRHADD] = 0x7,
4874 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4875 [NEON_3R_VHSUB] = 0x7,
4876 [NEON_3R_VQSUB] = 0xf,
4877 [NEON_3R_VCGT] = 0x7,
4878 [NEON_3R_VCGE] = 0x7,
4879 [NEON_3R_VSHL] = 0xf,
4880 [NEON_3R_VQSHL] = 0xf,
4881 [NEON_3R_VRSHL] = 0xf,
4882 [NEON_3R_VQRSHL] = 0xf,
4883 [NEON_3R_VMAX] = 0x7,
4884 [NEON_3R_VMIN] = 0x7,
4885 [NEON_3R_VABD] = 0x7,
4886 [NEON_3R_VABA] = 0x7,
4887 [NEON_3R_VADD_VSUB] = 0xf,
4888 [NEON_3R_VTST_VCEQ] = 0x7,
4889 [NEON_3R_VML] = 0x7,
4890 [NEON_3R_VMUL] = 0x7,
4891 [NEON_3R_VPMAX] = 0x7,
4892 [NEON_3R_VPMIN] = 0x7,
4893 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4894 [NEON_3R_VPADD] = 0x7,
4895 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4896 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4897 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4898 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4899 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4900 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4901 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4902 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4905 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4906 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4909 #define NEON_2RM_VREV64 0
4910 #define NEON_2RM_VREV32 1
4911 #define NEON_2RM_VREV16 2
4912 #define NEON_2RM_VPADDL 4
4913 #define NEON_2RM_VPADDL_U 5
4914 #define NEON_2RM_AESE 6 /* Includes AESD */
4915 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4916 #define NEON_2RM_VCLS 8
4917 #define NEON_2RM_VCLZ 9
4918 #define NEON_2RM_VCNT 10
4919 #define NEON_2RM_VMVN 11
4920 #define NEON_2RM_VPADAL 12
4921 #define NEON_2RM_VPADAL_U 13
4922 #define NEON_2RM_VQABS 14
4923 #define NEON_2RM_VQNEG 15
4924 #define NEON_2RM_VCGT0 16
4925 #define NEON_2RM_VCGE0 17
4926 #define NEON_2RM_VCEQ0 18
4927 #define NEON_2RM_VCLE0 19
4928 #define NEON_2RM_VCLT0 20
4929 #define NEON_2RM_SHA1H 21
4930 #define NEON_2RM_VABS 22
4931 #define NEON_2RM_VNEG 23
4932 #define NEON_2RM_VCGT0_F 24
4933 #define NEON_2RM_VCGE0_F 25
4934 #define NEON_2RM_VCEQ0_F 26
4935 #define NEON_2RM_VCLE0_F 27
4936 #define NEON_2RM_VCLT0_F 28
4937 #define NEON_2RM_VABS_F 30
4938 #define NEON_2RM_VNEG_F 31
4939 #define NEON_2RM_VSWP 32
4940 #define NEON_2RM_VTRN 33
4941 #define NEON_2RM_VUZP 34
4942 #define NEON_2RM_VZIP 35
4943 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4944 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4945 #define NEON_2RM_VSHLL 38
4946 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4947 #define NEON_2RM_VRINTN 40
4948 #define NEON_2RM_VRINTX 41
4949 #define NEON_2RM_VRINTA 42
4950 #define NEON_2RM_VRINTZ 43
4951 #define NEON_2RM_VCVT_F16_F32 44
4952 #define NEON_2RM_VRINTM 45
4953 #define NEON_2RM_VCVT_F32_F16 46
4954 #define NEON_2RM_VRINTP 47
4955 #define NEON_2RM_VCVTAU 48
4956 #define NEON_2RM_VCVTAS 49
4957 #define NEON_2RM_VCVTNU 50
4958 #define NEON_2RM_VCVTNS 51
4959 #define NEON_2RM_VCVTPU 52
4960 #define NEON_2RM_VCVTPS 53
4961 #define NEON_2RM_VCVTMU 54
4962 #define NEON_2RM_VCVTMS 55
4963 #define NEON_2RM_VRECPE 56
4964 #define NEON_2RM_VRSQRTE 57
4965 #define NEON_2RM_VRECPE_F 58
4966 #define NEON_2RM_VRSQRTE_F 59
4967 #define NEON_2RM_VCVT_FS 60
4968 #define NEON_2RM_VCVT_FU 61
4969 #define NEON_2RM_VCVT_SF 62
4970 #define NEON_2RM_VCVT_UF 63
4972 static int neon_2rm_is_float_op(int op)
4974 /* Return true if this neon 2reg-misc op is float-to-float */
4975 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4976 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
4977 op == NEON_2RM_VRINTM ||
4978 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
4979 op >= NEON_2RM_VRECPE_F);
4982 /* Each entry in this array has bit n set if the insn allows
4983 * size value n (otherwise it will UNDEF). Since unallocated
4984 * op values will have no bits set they always UNDEF.
4986 static const uint8_t neon_2rm_sizes[] = {
4987 [NEON_2RM_VREV64] = 0x7,
4988 [NEON_2RM_VREV32] = 0x3,
4989 [NEON_2RM_VREV16] = 0x1,
4990 [NEON_2RM_VPADDL] = 0x7,
4991 [NEON_2RM_VPADDL_U] = 0x7,
4992 [NEON_2RM_AESE] = 0x1,
4993 [NEON_2RM_AESMC] = 0x1,
4994 [NEON_2RM_VCLS] = 0x7,
4995 [NEON_2RM_VCLZ] = 0x7,
4996 [NEON_2RM_VCNT] = 0x1,
4997 [NEON_2RM_VMVN] = 0x1,
4998 [NEON_2RM_VPADAL] = 0x7,
4999 [NEON_2RM_VPADAL_U] = 0x7,
5000 [NEON_2RM_VQABS] = 0x7,
5001 [NEON_2RM_VQNEG] = 0x7,
5002 [NEON_2RM_VCGT0] = 0x7,
5003 [NEON_2RM_VCGE0] = 0x7,
5004 [NEON_2RM_VCEQ0] = 0x7,
5005 [NEON_2RM_VCLE0] = 0x7,
5006 [NEON_2RM_VCLT0] = 0x7,
5007 [NEON_2RM_SHA1H] = 0x4,
5008 [NEON_2RM_VABS] = 0x7,
5009 [NEON_2RM_VNEG] = 0x7,
5010 [NEON_2RM_VCGT0_F] = 0x4,
5011 [NEON_2RM_VCGE0_F] = 0x4,
5012 [NEON_2RM_VCEQ0_F] = 0x4,
5013 [NEON_2RM_VCLE0_F] = 0x4,
5014 [NEON_2RM_VCLT0_F] = 0x4,
5015 [NEON_2RM_VABS_F] = 0x4,
5016 [NEON_2RM_VNEG_F] = 0x4,
5017 [NEON_2RM_VSWP] = 0x1,
5018 [NEON_2RM_VTRN] = 0x7,
5019 [NEON_2RM_VUZP] = 0x7,
5020 [NEON_2RM_VZIP] = 0x7,
5021 [NEON_2RM_VMOVN] = 0x7,
5022 [NEON_2RM_VQMOVN] = 0x7,
5023 [NEON_2RM_VSHLL] = 0x7,
5024 [NEON_2RM_SHA1SU1] = 0x4,
5025 [NEON_2RM_VRINTN] = 0x4,
5026 [NEON_2RM_VRINTX] = 0x4,
5027 [NEON_2RM_VRINTA] = 0x4,
5028 [NEON_2RM_VRINTZ] = 0x4,
5029 [NEON_2RM_VCVT_F16_F32] = 0x2,
5030 [NEON_2RM_VRINTM] = 0x4,
5031 [NEON_2RM_VCVT_F32_F16] = 0x2,
5032 [NEON_2RM_VRINTP] = 0x4,
5033 [NEON_2RM_VCVTAU] = 0x4,
5034 [NEON_2RM_VCVTAS] = 0x4,
5035 [NEON_2RM_VCVTNU] = 0x4,
5036 [NEON_2RM_VCVTNS] = 0x4,
5037 [NEON_2RM_VCVTPU] = 0x4,
5038 [NEON_2RM_VCVTPS] = 0x4,
5039 [NEON_2RM_VCVTMU] = 0x4,
5040 [NEON_2RM_VCVTMS] = 0x4,
5041 [NEON_2RM_VRECPE] = 0x4,
5042 [NEON_2RM_VRSQRTE] = 0x4,
5043 [NEON_2RM_VRECPE_F] = 0x4,
5044 [NEON_2RM_VRSQRTE_F] = 0x4,
5045 [NEON_2RM_VCVT_FS] = 0x4,
5046 [NEON_2RM_VCVT_FU] = 0x4,
5047 [NEON_2RM_VCVT_SF] = 0x4,
5048 [NEON_2RM_VCVT_UF] = 0x4,
5051 /* Translate a NEON data processing instruction. Return nonzero if the
5052 instruction is invalid.
5053 We process data in a mixture of 32-bit and 64-bit chunks.
5054 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5056 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5068 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5071 /* FIXME: this access check should not take precedence over UNDEF
5072 * for invalid encodings; we will generate incorrect syndrome information
5073 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5075 if (!s->cpacr_fpen) {
5076 gen_exception_insn(s, 4, EXCP_UDEF,
5077 syn_fp_access_trap(1, 0xe, s->thumb));
5081 if (!s->vfp_enabled)
5083 q = (insn & (1 << 6)) != 0;
5084 u = (insn >> 24) & 1;
5085 VFP_DREG_D(rd, insn);
5086 VFP_DREG_N(rn, insn);
5087 VFP_DREG_M(rm, insn);
5088 size = (insn >> 20) & 3;
5089 if ((insn & (1 << 23)) == 0) {
5090 /* Three register same length. */
5091 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5092 /* Catch invalid op and bad size combinations: UNDEF */
5093 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5096 /* All insns of this form UNDEF for either this condition or the
5097 * superset of cases "Q==1"; we catch the latter later.
5099 if (q && ((rd | rn | rm) & 1)) {
5103 * The SHA-1/SHA-256 3-register instructions require special treatment
5104 * here, as their size field is overloaded as an op type selector, and
5105 * they all consume their input in a single pass.
5107 if (op == NEON_3R_SHA) {
5111 if (!u) { /* SHA-1 */
5112 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5115 tmp = tcg_const_i32(rd);
5116 tmp2 = tcg_const_i32(rn);
5117 tmp3 = tcg_const_i32(rm);
5118 tmp4 = tcg_const_i32(size);
5119 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5120 tcg_temp_free_i32(tmp4);
5121 } else { /* SHA-256 */
5122 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5125 tmp = tcg_const_i32(rd);
5126 tmp2 = tcg_const_i32(rn);
5127 tmp3 = tcg_const_i32(rm);
5130 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5133 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5136 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5140 tcg_temp_free_i32(tmp);
5141 tcg_temp_free_i32(tmp2);
5142 tcg_temp_free_i32(tmp3);
5145 if (size == 3 && op != NEON_3R_LOGIC) {
5146 /* 64-bit element instructions. */
5147 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5148 neon_load_reg64(cpu_V0, rn + pass);
5149 neon_load_reg64(cpu_V1, rm + pass);
5153 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5156 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5162 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5165 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5171 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5173 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5178 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5181 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5187 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5189 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5192 case NEON_3R_VQRSHL:
5194 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5197 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5201 case NEON_3R_VADD_VSUB:
5203 tcg_gen_sub_i64(CPU_V001);
5205 tcg_gen_add_i64(CPU_V001);
5211 neon_store_reg64(cpu_V0, rd + pass);
5220 case NEON_3R_VQRSHL:
5223 /* Shift instruction operands are reversed. */
5238 case NEON_3R_FLOAT_ARITH:
5239 pairwise = (u && size < 2); /* if VPADD (float) */
5241 case NEON_3R_FLOAT_MINMAX:
5242 pairwise = u; /* if VPMIN/VPMAX (float) */
5244 case NEON_3R_FLOAT_CMP:
5246 /* no encoding for U=0 C=1x */
5250 case NEON_3R_FLOAT_ACMP:
5255 case NEON_3R_FLOAT_MISC:
5256 /* VMAXNM/VMINNM in ARMv8 */
5257 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5262 if (u && (size != 0)) {
5263 /* UNDEF on invalid size for polynomial subcase */
5268 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5276 if (pairwise && q) {
5277 /* All the pairwise insns UNDEF if Q is set */
5281 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5286 tmp = neon_load_reg(rn, 0);
5287 tmp2 = neon_load_reg(rn, 1);
5289 tmp = neon_load_reg(rm, 0);
5290 tmp2 = neon_load_reg(rm, 1);
5294 tmp = neon_load_reg(rn, pass);
5295 tmp2 = neon_load_reg(rm, pass);
5299 GEN_NEON_INTEGER_OP(hadd);
5302 GEN_NEON_INTEGER_OP_ENV(qadd);
5304 case NEON_3R_VRHADD:
5305 GEN_NEON_INTEGER_OP(rhadd);
5307 case NEON_3R_LOGIC: /* Logic ops. */
5308 switch ((u << 2) | size) {
5310 tcg_gen_and_i32(tmp, tmp, tmp2);
5313 tcg_gen_andc_i32(tmp, tmp, tmp2);
5316 tcg_gen_or_i32(tmp, tmp, tmp2);
5319 tcg_gen_orc_i32(tmp, tmp, tmp2);
5322 tcg_gen_xor_i32(tmp, tmp, tmp2);
5325 tmp3 = neon_load_reg(rd, pass);
5326 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5327 tcg_temp_free_i32(tmp3);
5330 tmp3 = neon_load_reg(rd, pass);
5331 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5332 tcg_temp_free_i32(tmp3);
5335 tmp3 = neon_load_reg(rd, pass);
5336 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5337 tcg_temp_free_i32(tmp3);
5342 GEN_NEON_INTEGER_OP(hsub);
5345 GEN_NEON_INTEGER_OP_ENV(qsub);
5348 GEN_NEON_INTEGER_OP(cgt);
5351 GEN_NEON_INTEGER_OP(cge);
5354 GEN_NEON_INTEGER_OP(shl);
5357 GEN_NEON_INTEGER_OP_ENV(qshl);
5360 GEN_NEON_INTEGER_OP(rshl);
5362 case NEON_3R_VQRSHL:
5363 GEN_NEON_INTEGER_OP_ENV(qrshl);
5366 GEN_NEON_INTEGER_OP(max);
5369 GEN_NEON_INTEGER_OP(min);
5372 GEN_NEON_INTEGER_OP(abd);
5375 GEN_NEON_INTEGER_OP(abd);
5376 tcg_temp_free_i32(tmp2);
5377 tmp2 = neon_load_reg(rd, pass);
5378 gen_neon_add(size, tmp, tmp2);
5380 case NEON_3R_VADD_VSUB:
5381 if (!u) { /* VADD */
5382 gen_neon_add(size, tmp, tmp2);
5385 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5386 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5387 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5392 case NEON_3R_VTST_VCEQ:
5393 if (!u) { /* VTST */
5395 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5396 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5397 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5402 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5403 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5404 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5409 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5411 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5412 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5413 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5416 tcg_temp_free_i32(tmp2);
5417 tmp2 = neon_load_reg(rd, pass);
5419 gen_neon_rsb(size, tmp, tmp2);
5421 gen_neon_add(size, tmp, tmp2);
5425 if (u) { /* polynomial */
5426 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5427 } else { /* Integer */
5429 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5430 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5431 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5437 GEN_NEON_INTEGER_OP(pmax);
5440 GEN_NEON_INTEGER_OP(pmin);
5442 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5443 if (!u) { /* VQDMULH */
5446 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5449 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5453 } else { /* VQRDMULH */
5456 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5459 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5467 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5468 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5469 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5473 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5475 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5476 switch ((u << 2) | size) {
5479 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5482 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5485 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5490 tcg_temp_free_ptr(fpstatus);
5493 case NEON_3R_FLOAT_MULTIPLY:
5495 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5496 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5498 tcg_temp_free_i32(tmp2);
5499 tmp2 = neon_load_reg(rd, pass);
5501 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5503 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5506 tcg_temp_free_ptr(fpstatus);
5509 case NEON_3R_FLOAT_CMP:
5511 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5513 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5516 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5518 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5521 tcg_temp_free_ptr(fpstatus);
5524 case NEON_3R_FLOAT_ACMP:
5526 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5528 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5530 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5532 tcg_temp_free_ptr(fpstatus);
5535 case NEON_3R_FLOAT_MINMAX:
5537 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5539 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5541 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5543 tcg_temp_free_ptr(fpstatus);
5546 case NEON_3R_FLOAT_MISC:
5549 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5551 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5553 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5555 tcg_temp_free_ptr(fpstatus);
5558 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5560 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5566 /* VFMA, VFMS: fused multiply-add */
5567 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5568 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5571 gen_helper_vfp_negs(tmp, tmp);
5573 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5574 tcg_temp_free_i32(tmp3);
5575 tcg_temp_free_ptr(fpstatus);
5581 tcg_temp_free_i32(tmp2);
5583 /* Save the result. For elementwise operations we can put it
5584 straight into the destination register. For pairwise operations
5585 we have to be careful to avoid clobbering the source operands. */
5586 if (pairwise && rd == rm) {
5587 neon_store_scratch(pass, tmp);
5589 neon_store_reg(rd, pass, tmp);
5593 if (pairwise && rd == rm) {
5594 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5595 tmp = neon_load_scratch(pass);
5596 neon_store_reg(rd, pass, tmp);
5599 /* End of 3 register same size operations. */
5600 } else if (insn & (1 << 4)) {
5601 if ((insn & 0x00380080) != 0) {
5602 /* Two registers and shift. */
5603 op = (insn >> 8) & 0xf;
5604 if (insn & (1 << 7)) {
5612 while ((insn & (1 << (size + 19))) == 0)
5615 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5616 /* To avoid excessive duplication of ops we implement shift
5617 by immediate using the variable shift operations. */
5619 /* Shift by immediate:
5620 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5621 if (q && ((rd | rm) & 1)) {
5624 if (!u && (op == 4 || op == 6)) {
5627 /* Right shifts are encoded as N - shift, where N is the
5628 element size in bits. */
5630 shift = shift - (1 << (size + 3));
5638 imm = (uint8_t) shift;
5643 imm = (uint16_t) shift;
5654 for (pass = 0; pass < count; pass++) {
5656 neon_load_reg64(cpu_V0, rm + pass);
5657 tcg_gen_movi_i64(cpu_V1, imm);
5662 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5664 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5669 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5671 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5674 case 5: /* VSHL, VSLI */
5675 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5677 case 6: /* VQSHLU */
5678 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5683 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5686 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5691 if (op == 1 || op == 3) {
5693 neon_load_reg64(cpu_V1, rd + pass);
5694 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5695 } else if (op == 4 || (op == 5 && u)) {
5697 neon_load_reg64(cpu_V1, rd + pass);
5699 if (shift < -63 || shift > 63) {
5703 mask = 0xffffffffffffffffull >> -shift;
5705 mask = 0xffffffffffffffffull << shift;
5708 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5709 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5711 neon_store_reg64(cpu_V0, rd + pass);
5712 } else { /* size < 3 */
5713 /* Operands in T0 and T1. */
5714 tmp = neon_load_reg(rm, pass);
5715 tmp2 = tcg_temp_new_i32();
5716 tcg_gen_movi_i32(tmp2, imm);
5720 GEN_NEON_INTEGER_OP(shl);
5724 GEN_NEON_INTEGER_OP(rshl);
5727 case 5: /* VSHL, VSLI */
5729 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5730 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5731 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5735 case 6: /* VQSHLU */
5738 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5742 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5746 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5754 GEN_NEON_INTEGER_OP_ENV(qshl);
5757 tcg_temp_free_i32(tmp2);
5759 if (op == 1 || op == 3) {
5761 tmp2 = neon_load_reg(rd, pass);
5762 gen_neon_add(size, tmp, tmp2);
5763 tcg_temp_free_i32(tmp2);
5764 } else if (op == 4 || (op == 5 && u)) {
5769 mask = 0xff >> -shift;
5771 mask = (uint8_t)(0xff << shift);
5777 mask = 0xffff >> -shift;
5779 mask = (uint16_t)(0xffff << shift);
5783 if (shift < -31 || shift > 31) {
5787 mask = 0xffffffffu >> -shift;
5789 mask = 0xffffffffu << shift;
5795 tmp2 = neon_load_reg(rd, pass);
5796 tcg_gen_andi_i32(tmp, tmp, mask);
5797 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5798 tcg_gen_or_i32(tmp, tmp, tmp2);
5799 tcg_temp_free_i32(tmp2);
5801 neon_store_reg(rd, pass, tmp);
5804 } else if (op < 10) {
5805 /* Shift by immediate and narrow:
5806 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5807 int input_unsigned = (op == 8) ? !u : u;
5811 shift = shift - (1 << (size + 3));
5814 tmp64 = tcg_const_i64(shift);
5815 neon_load_reg64(cpu_V0, rm);
5816 neon_load_reg64(cpu_V1, rm + 1);
5817 for (pass = 0; pass < 2; pass++) {
5825 if (input_unsigned) {
5826 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5828 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5831 if (input_unsigned) {
5832 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5834 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5837 tmp = tcg_temp_new_i32();
5838 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5839 neon_store_reg(rd, pass, tmp);
5841 tcg_temp_free_i64(tmp64);
5844 imm = (uint16_t)shift;
5848 imm = (uint32_t)shift;
5850 tmp2 = tcg_const_i32(imm);
5851 tmp4 = neon_load_reg(rm + 1, 0);
5852 tmp5 = neon_load_reg(rm + 1, 1);
5853 for (pass = 0; pass < 2; pass++) {
5855 tmp = neon_load_reg(rm, 0);
5859 gen_neon_shift_narrow(size, tmp, tmp2, q,
5862 tmp3 = neon_load_reg(rm, 1);
5866 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5868 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5869 tcg_temp_free_i32(tmp);
5870 tcg_temp_free_i32(tmp3);
5871 tmp = tcg_temp_new_i32();
5872 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5873 neon_store_reg(rd, pass, tmp);
5875 tcg_temp_free_i32(tmp2);
5877 } else if (op == 10) {
5879 if (q || (rd & 1)) {
5882 tmp = neon_load_reg(rm, 0);
5883 tmp2 = neon_load_reg(rm, 1);
5884 for (pass = 0; pass < 2; pass++) {
5888 gen_neon_widen(cpu_V0, tmp, size, u);
5891 /* The shift is less than the width of the source
5892 type, so we can just shift the whole register. */
5893 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5894 /* Widen the result of shift: we need to clear
5895 * the potential overflow bits resulting from
5896 * left bits of the narrow input appearing as
5897 * right bits of left the neighbour narrow
5899 if (size < 2 || !u) {
5902 imm = (0xffu >> (8 - shift));
5904 } else if (size == 1) {
5905 imm = 0xffff >> (16 - shift);
5908 imm = 0xffffffff >> (32 - shift);
5911 imm64 = imm | (((uint64_t)imm) << 32);
5915 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5918 neon_store_reg64(cpu_V0, rd + pass);
5920 } else if (op >= 14) {
5921 /* VCVT fixed-point. */
5922 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5925 /* We have already masked out the must-be-1 top bit of imm6,
5926 * hence this 32-shift where the ARM ARM has 64-imm6.
5929 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5930 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5933 gen_vfp_ulto(0, shift, 1);
5935 gen_vfp_slto(0, shift, 1);
5938 gen_vfp_toul(0, shift, 1);
5940 gen_vfp_tosl(0, shift, 1);
5942 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5947 } else { /* (insn & 0x00380080) == 0 */
5949 if (q && (rd & 1)) {
5953 op = (insn >> 8) & 0xf;
5954 /* One register and immediate. */
5955 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5956 invert = (insn & (1 << 5)) != 0;
5957 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5958 * We choose to not special-case this and will behave as if a
5959 * valid constant encoding of 0 had been given.
5978 imm = (imm << 8) | (imm << 24);
5981 imm = (imm << 8) | 0xff;
5984 imm = (imm << 16) | 0xffff;
5987 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5995 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5996 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6002 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6003 if (op & 1 && op < 12) {
6004 tmp = neon_load_reg(rd, pass);
6006 /* The immediate value has already been inverted, so
6008 tcg_gen_andi_i32(tmp, tmp, imm);
6010 tcg_gen_ori_i32(tmp, tmp, imm);
6014 tmp = tcg_temp_new_i32();
6015 if (op == 14 && invert) {
6019 for (n = 0; n < 4; n++) {
6020 if (imm & (1 << (n + (pass & 1) * 4)))
6021 val |= 0xff << (n * 8);
6023 tcg_gen_movi_i32(tmp, val);
6025 tcg_gen_movi_i32(tmp, imm);
6028 neon_store_reg(rd, pass, tmp);
6031 } else { /* (insn & 0x00800010 == 0x00800000) */
6033 op = (insn >> 8) & 0xf;
6034 if ((insn & (1 << 6)) == 0) {
6035 /* Three registers of different lengths. */
6039 /* undefreq: bit 0 : UNDEF if size == 0
6040 * bit 1 : UNDEF if size == 1
6041 * bit 2 : UNDEF if size == 2
6042 * bit 3 : UNDEF if U == 1
6043 * Note that [2:0] set implies 'always UNDEF'
6046 /* prewiden, src1_wide, src2_wide, undefreq */
6047 static const int neon_3reg_wide[16][4] = {
6048 {1, 0, 0, 0}, /* VADDL */
6049 {1, 1, 0, 0}, /* VADDW */
6050 {1, 0, 0, 0}, /* VSUBL */
6051 {1, 1, 0, 0}, /* VSUBW */
6052 {0, 1, 1, 0}, /* VADDHN */
6053 {0, 0, 0, 0}, /* VABAL */
6054 {0, 1, 1, 0}, /* VSUBHN */
6055 {0, 0, 0, 0}, /* VABDL */
6056 {0, 0, 0, 0}, /* VMLAL */
6057 {0, 0, 0, 9}, /* VQDMLAL */
6058 {0, 0, 0, 0}, /* VMLSL */
6059 {0, 0, 0, 9}, /* VQDMLSL */
6060 {0, 0, 0, 0}, /* Integer VMULL */
6061 {0, 0, 0, 1}, /* VQDMULL */
6062 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6063 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6066 prewiden = neon_3reg_wide[op][0];
6067 src1_wide = neon_3reg_wide[op][1];
6068 src2_wide = neon_3reg_wide[op][2];
6069 undefreq = neon_3reg_wide[op][3];
6071 if ((undefreq & (1 << size)) ||
6072 ((undefreq & 8) && u)) {
6075 if ((src1_wide && (rn & 1)) ||
6076 (src2_wide && (rm & 1)) ||
6077 (!src2_wide && (rd & 1))) {
6081 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6082 * outside the loop below as it only performs a single pass.
6084 if (op == 14 && size == 2) {
6085 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6087 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6090 tcg_rn = tcg_temp_new_i64();
6091 tcg_rm = tcg_temp_new_i64();
6092 tcg_rd = tcg_temp_new_i64();
6093 neon_load_reg64(tcg_rn, rn);
6094 neon_load_reg64(tcg_rm, rm);
6095 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6096 neon_store_reg64(tcg_rd, rd);
6097 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6098 neon_store_reg64(tcg_rd, rd + 1);
6099 tcg_temp_free_i64(tcg_rn);
6100 tcg_temp_free_i64(tcg_rm);
6101 tcg_temp_free_i64(tcg_rd);
6105 /* Avoid overlapping operands. Wide source operands are
6106 always aligned so will never overlap with wide
6107 destinations in problematic ways. */
6108 if (rd == rm && !src2_wide) {
6109 tmp = neon_load_reg(rm, 1);
6110 neon_store_scratch(2, tmp);
6111 } else if (rd == rn && !src1_wide) {
6112 tmp = neon_load_reg(rn, 1);
6113 neon_store_scratch(2, tmp);
6115 TCGV_UNUSED_I32(tmp3);
6116 for (pass = 0; pass < 2; pass++) {
6118 neon_load_reg64(cpu_V0, rn + pass);
6119 TCGV_UNUSED_I32(tmp);
6121 if (pass == 1 && rd == rn) {
6122 tmp = neon_load_scratch(2);
6124 tmp = neon_load_reg(rn, pass);
6127 gen_neon_widen(cpu_V0, tmp, size, u);
6131 neon_load_reg64(cpu_V1, rm + pass);
6132 TCGV_UNUSED_I32(tmp2);
6134 if (pass == 1 && rd == rm) {
6135 tmp2 = neon_load_scratch(2);
6137 tmp2 = neon_load_reg(rm, pass);
6140 gen_neon_widen(cpu_V1, tmp2, size, u);
6144 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6145 gen_neon_addl(size);
6147 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6148 gen_neon_subl(size);
6150 case 5: case 7: /* VABAL, VABDL */
6151 switch ((size << 1) | u) {
6153 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6156 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6159 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6162 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6165 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6168 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6172 tcg_temp_free_i32(tmp2);
6173 tcg_temp_free_i32(tmp);
6175 case 8: case 9: case 10: case 11: case 12: case 13:
6176 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6177 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6179 case 14: /* Polynomial VMULL */
6180 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6181 tcg_temp_free_i32(tmp2);
6182 tcg_temp_free_i32(tmp);
6184 default: /* 15 is RESERVED: caught earlier */
6189 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6190 neon_store_reg64(cpu_V0, rd + pass);
6191 } else if (op == 5 || (op >= 8 && op <= 11)) {
6193 neon_load_reg64(cpu_V1, rd + pass);
6195 case 10: /* VMLSL */
6196 gen_neon_negl(cpu_V0, size);
6198 case 5: case 8: /* VABAL, VMLAL */
6199 gen_neon_addl(size);
6201 case 9: case 11: /* VQDMLAL, VQDMLSL */
6202 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6204 gen_neon_negl(cpu_V0, size);
6206 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6211 neon_store_reg64(cpu_V0, rd + pass);
6212 } else if (op == 4 || op == 6) {
6213 /* Narrowing operation. */
6214 tmp = tcg_temp_new_i32();
6218 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6221 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6224 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6225 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6232 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6235 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6238 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6239 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6240 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6248 neon_store_reg(rd, 0, tmp3);
6249 neon_store_reg(rd, 1, tmp);
6252 /* Write back the result. */
6253 neon_store_reg64(cpu_V0, rd + pass);
6257 /* Two registers and a scalar. NB that for ops of this form
6258 * the ARM ARM labels bit 24 as Q, but it is in our variable
6265 case 1: /* Float VMLA scalar */
6266 case 5: /* Floating point VMLS scalar */
6267 case 9: /* Floating point VMUL scalar */
6272 case 0: /* Integer VMLA scalar */
6273 case 4: /* Integer VMLS scalar */
6274 case 8: /* Integer VMUL scalar */
6275 case 12: /* VQDMULH scalar */
6276 case 13: /* VQRDMULH scalar */
6277 if (u && ((rd | rn) & 1)) {
6280 tmp = neon_get_scalar(size, rm);
6281 neon_store_scratch(0, tmp);
6282 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6283 tmp = neon_load_scratch(0);
6284 tmp2 = neon_load_reg(rn, pass);
6287 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6289 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6291 } else if (op == 13) {
6293 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6295 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6297 } else if (op & 1) {
6298 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6299 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6300 tcg_temp_free_ptr(fpstatus);
6303 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6304 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6305 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6309 tcg_temp_free_i32(tmp2);
6312 tmp2 = neon_load_reg(rd, pass);
6315 gen_neon_add(size, tmp, tmp2);
6319 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6320 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6321 tcg_temp_free_ptr(fpstatus);
6325 gen_neon_rsb(size, tmp, tmp2);
6329 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6330 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6331 tcg_temp_free_ptr(fpstatus);
6337 tcg_temp_free_i32(tmp2);
6339 neon_store_reg(rd, pass, tmp);
6342 case 3: /* VQDMLAL scalar */
6343 case 7: /* VQDMLSL scalar */
6344 case 11: /* VQDMULL scalar */
6349 case 2: /* VMLAL sclar */
6350 case 6: /* VMLSL scalar */
6351 case 10: /* VMULL scalar */
6355 tmp2 = neon_get_scalar(size, rm);
6356 /* We need a copy of tmp2 because gen_neon_mull
6357 * deletes it during pass 0. */
6358 tmp4 = tcg_temp_new_i32();
6359 tcg_gen_mov_i32(tmp4, tmp2);
6360 tmp3 = neon_load_reg(rn, 1);
6362 for (pass = 0; pass < 2; pass++) {
6364 tmp = neon_load_reg(rn, 0);
6369 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6371 neon_load_reg64(cpu_V1, rd + pass);
6375 gen_neon_negl(cpu_V0, size);
6378 gen_neon_addl(size);
6381 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6383 gen_neon_negl(cpu_V0, size);
6385 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6391 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6396 neon_store_reg64(cpu_V0, rd + pass);
6401 default: /* 14 and 15 are RESERVED */
6405 } else { /* size == 3 */
6408 imm = (insn >> 8) & 0xf;
6413 if (q && ((rd | rn | rm) & 1)) {
6418 neon_load_reg64(cpu_V0, rn);
6420 neon_load_reg64(cpu_V1, rn + 1);
6422 } else if (imm == 8) {
6423 neon_load_reg64(cpu_V0, rn + 1);
6425 neon_load_reg64(cpu_V1, rm);
6428 tmp64 = tcg_temp_new_i64();
6430 neon_load_reg64(cpu_V0, rn);
6431 neon_load_reg64(tmp64, rn + 1);
6433 neon_load_reg64(cpu_V0, rn + 1);
6434 neon_load_reg64(tmp64, rm);
6436 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6437 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6438 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6440 neon_load_reg64(cpu_V1, rm);
6442 neon_load_reg64(cpu_V1, rm + 1);
6445 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6446 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6447 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6448 tcg_temp_free_i64(tmp64);
6451 neon_load_reg64(cpu_V0, rn);
6452 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6453 neon_load_reg64(cpu_V1, rm);
6454 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6455 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6457 neon_store_reg64(cpu_V0, rd);
6459 neon_store_reg64(cpu_V1, rd + 1);
6461 } else if ((insn & (1 << 11)) == 0) {
6462 /* Two register misc. */
6463 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6464 size = (insn >> 18) & 3;
6465 /* UNDEF for unknown op values and bad op-size combinations */
6466 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6469 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6470 q && ((rm | rd) & 1)) {
6474 case NEON_2RM_VREV64:
6475 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6476 tmp = neon_load_reg(rm, pass * 2);
6477 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6479 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6480 case 1: gen_swap_half(tmp); break;
6481 case 2: /* no-op */ break;
6484 neon_store_reg(rd, pass * 2 + 1, tmp);
6486 neon_store_reg(rd, pass * 2, tmp2);
6489 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6490 case 1: gen_swap_half(tmp2); break;
6493 neon_store_reg(rd, pass * 2, tmp2);
6497 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6498 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6499 for (pass = 0; pass < q + 1; pass++) {
6500 tmp = neon_load_reg(rm, pass * 2);
6501 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6502 tmp = neon_load_reg(rm, pass * 2 + 1);
6503 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6505 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6506 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6507 case 2: tcg_gen_add_i64(CPU_V001); break;
6510 if (op >= NEON_2RM_VPADAL) {
6512 neon_load_reg64(cpu_V1, rd + pass);
6513 gen_neon_addl(size);
6515 neon_store_reg64(cpu_V0, rd + pass);
6521 for (n = 0; n < (q ? 4 : 2); n += 2) {
6522 tmp = neon_load_reg(rm, n);
6523 tmp2 = neon_load_reg(rd, n + 1);
6524 neon_store_reg(rm, n, tmp2);
6525 neon_store_reg(rd, n + 1, tmp);
6532 if (gen_neon_unzip(rd, rm, size, q)) {
6537 if (gen_neon_zip(rd, rm, size, q)) {
6541 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6542 /* also VQMOVUN; op field and mnemonics don't line up */
6546 TCGV_UNUSED_I32(tmp2);
6547 for (pass = 0; pass < 2; pass++) {
6548 neon_load_reg64(cpu_V0, rm + pass);
6549 tmp = tcg_temp_new_i32();
6550 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6555 neon_store_reg(rd, 0, tmp2);
6556 neon_store_reg(rd, 1, tmp);
6560 case NEON_2RM_VSHLL:
6561 if (q || (rd & 1)) {
6564 tmp = neon_load_reg(rm, 0);
6565 tmp2 = neon_load_reg(rm, 1);
6566 for (pass = 0; pass < 2; pass++) {
6569 gen_neon_widen(cpu_V0, tmp, size, 1);
6570 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6571 neon_store_reg64(cpu_V0, rd + pass);
6574 case NEON_2RM_VCVT_F16_F32:
6575 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6579 tmp = tcg_temp_new_i32();
6580 tmp2 = tcg_temp_new_i32();
6581 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6582 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6583 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6584 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6585 tcg_gen_shli_i32(tmp2, tmp2, 16);
6586 tcg_gen_or_i32(tmp2, tmp2, tmp);
6587 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6588 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6589 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6590 neon_store_reg(rd, 0, tmp2);
6591 tmp2 = tcg_temp_new_i32();
6592 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6593 tcg_gen_shli_i32(tmp2, tmp2, 16);
6594 tcg_gen_or_i32(tmp2, tmp2, tmp);
6595 neon_store_reg(rd, 1, tmp2);
6596 tcg_temp_free_i32(tmp);
6598 case NEON_2RM_VCVT_F32_F16:
6599 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6603 tmp3 = tcg_temp_new_i32();
6604 tmp = neon_load_reg(rm, 0);
6605 tmp2 = neon_load_reg(rm, 1);
6606 tcg_gen_ext16u_i32(tmp3, tmp);
6607 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6608 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6609 tcg_gen_shri_i32(tmp3, tmp, 16);
6610 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6611 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6612 tcg_temp_free_i32(tmp);
6613 tcg_gen_ext16u_i32(tmp3, tmp2);
6614 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6615 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6616 tcg_gen_shri_i32(tmp3, tmp2, 16);
6617 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6618 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6619 tcg_temp_free_i32(tmp2);
6620 tcg_temp_free_i32(tmp3);
6622 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6623 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6624 || ((rm | rd) & 1)) {
6627 tmp = tcg_const_i32(rd);
6628 tmp2 = tcg_const_i32(rm);
6630 /* Bit 6 is the lowest opcode bit; it distinguishes between
6631 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6633 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6635 if (op == NEON_2RM_AESE) {
6636 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6638 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6640 tcg_temp_free_i32(tmp);
6641 tcg_temp_free_i32(tmp2);
6642 tcg_temp_free_i32(tmp3);
6644 case NEON_2RM_SHA1H:
6645 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
6646 || ((rm | rd) & 1)) {
6649 tmp = tcg_const_i32(rd);
6650 tmp2 = tcg_const_i32(rm);
6652 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6654 tcg_temp_free_i32(tmp);
6655 tcg_temp_free_i32(tmp2);
6657 case NEON_2RM_SHA1SU1:
6658 if ((rm | rd) & 1) {
6661 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6663 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
6666 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
6669 tmp = tcg_const_i32(rd);
6670 tmp2 = tcg_const_i32(rm);
6672 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6674 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6676 tcg_temp_free_i32(tmp);
6677 tcg_temp_free_i32(tmp2);
6681 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6682 if (neon_2rm_is_float_op(op)) {
6683 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6684 neon_reg_offset(rm, pass));
6685 TCGV_UNUSED_I32(tmp);
6687 tmp = neon_load_reg(rm, pass);
6690 case NEON_2RM_VREV32:
6692 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6693 case 1: gen_swap_half(tmp); break;
6697 case NEON_2RM_VREV16:
6702 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6703 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6704 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6710 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6711 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6712 case 2: gen_helper_clz(tmp, tmp); break;
6717 gen_helper_neon_cnt_u8(tmp, tmp);
6720 tcg_gen_not_i32(tmp, tmp);
6722 case NEON_2RM_VQABS:
6725 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6728 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6731 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6736 case NEON_2RM_VQNEG:
6739 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6742 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6745 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6750 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6751 tmp2 = tcg_const_i32(0);
6753 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6754 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6755 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6758 tcg_temp_free_i32(tmp2);
6759 if (op == NEON_2RM_VCLE0) {
6760 tcg_gen_not_i32(tmp, tmp);
6763 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6764 tmp2 = tcg_const_i32(0);
6766 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6767 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6768 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6771 tcg_temp_free_i32(tmp2);
6772 if (op == NEON_2RM_VCLT0) {
6773 tcg_gen_not_i32(tmp, tmp);
6776 case NEON_2RM_VCEQ0:
6777 tmp2 = tcg_const_i32(0);
6779 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6780 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6781 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6784 tcg_temp_free_i32(tmp2);
6788 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6789 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6790 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6795 tmp2 = tcg_const_i32(0);
6796 gen_neon_rsb(size, tmp, tmp2);
6797 tcg_temp_free_i32(tmp2);
6799 case NEON_2RM_VCGT0_F:
6801 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6802 tmp2 = tcg_const_i32(0);
6803 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6804 tcg_temp_free_i32(tmp2);
6805 tcg_temp_free_ptr(fpstatus);
6808 case NEON_2RM_VCGE0_F:
6810 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6811 tmp2 = tcg_const_i32(0);
6812 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6813 tcg_temp_free_i32(tmp2);
6814 tcg_temp_free_ptr(fpstatus);
6817 case NEON_2RM_VCEQ0_F:
6819 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6820 tmp2 = tcg_const_i32(0);
6821 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6822 tcg_temp_free_i32(tmp2);
6823 tcg_temp_free_ptr(fpstatus);
6826 case NEON_2RM_VCLE0_F:
6828 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6829 tmp2 = tcg_const_i32(0);
6830 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6831 tcg_temp_free_i32(tmp2);
6832 tcg_temp_free_ptr(fpstatus);
6835 case NEON_2RM_VCLT0_F:
6837 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6838 tmp2 = tcg_const_i32(0);
6839 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6840 tcg_temp_free_i32(tmp2);
6841 tcg_temp_free_ptr(fpstatus);
6844 case NEON_2RM_VABS_F:
6847 case NEON_2RM_VNEG_F:
6851 tmp2 = neon_load_reg(rd, pass);
6852 neon_store_reg(rm, pass, tmp2);
6855 tmp2 = neon_load_reg(rd, pass);
6857 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6858 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6861 neon_store_reg(rm, pass, tmp2);
6863 case NEON_2RM_VRINTN:
6864 case NEON_2RM_VRINTA:
6865 case NEON_2RM_VRINTM:
6866 case NEON_2RM_VRINTP:
6867 case NEON_2RM_VRINTZ:
6870 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6873 if (op == NEON_2RM_VRINTZ) {
6874 rmode = FPROUNDING_ZERO;
6876 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6879 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6880 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6882 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6883 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6885 tcg_temp_free_ptr(fpstatus);
6886 tcg_temp_free_i32(tcg_rmode);
6889 case NEON_2RM_VRINTX:
6891 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6892 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6893 tcg_temp_free_ptr(fpstatus);
6896 case NEON_2RM_VCVTAU:
6897 case NEON_2RM_VCVTAS:
6898 case NEON_2RM_VCVTNU:
6899 case NEON_2RM_VCVTNS:
6900 case NEON_2RM_VCVTPU:
6901 case NEON_2RM_VCVTPS:
6902 case NEON_2RM_VCVTMU:
6903 case NEON_2RM_VCVTMS:
6905 bool is_signed = !extract32(insn, 7, 1);
6906 TCGv_ptr fpst = get_fpstatus_ptr(1);
6907 TCGv_i32 tcg_rmode, tcg_shift;
6908 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6910 tcg_shift = tcg_const_i32(0);
6911 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6912 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6916 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6919 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6923 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6925 tcg_temp_free_i32(tcg_rmode);
6926 tcg_temp_free_i32(tcg_shift);
6927 tcg_temp_free_ptr(fpst);
6930 case NEON_2RM_VRECPE:
6932 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6933 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6934 tcg_temp_free_ptr(fpstatus);
6937 case NEON_2RM_VRSQRTE:
6939 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6940 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6941 tcg_temp_free_ptr(fpstatus);
6944 case NEON_2RM_VRECPE_F:
6946 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6947 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6948 tcg_temp_free_ptr(fpstatus);
6951 case NEON_2RM_VRSQRTE_F:
6953 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6954 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6955 tcg_temp_free_ptr(fpstatus);
6958 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6961 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6964 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6965 gen_vfp_tosiz(0, 1);
6967 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6968 gen_vfp_touiz(0, 1);
6971 /* Reserved op values were caught by the
6972 * neon_2rm_sizes[] check earlier.
6976 if (neon_2rm_is_float_op(op)) {
6977 tcg_gen_st_f32(cpu_F0s, cpu_env,
6978 neon_reg_offset(rd, pass));
6980 neon_store_reg(rd, pass, tmp);
6985 } else if ((insn & (1 << 10)) == 0) {
6987 int n = ((insn >> 8) & 3) + 1;
6988 if ((rn + n) > 32) {
6989 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6990 * helper function running off the end of the register file.
6995 if (insn & (1 << 6)) {
6996 tmp = neon_load_reg(rd, 0);
6998 tmp = tcg_temp_new_i32();
6999 tcg_gen_movi_i32(tmp, 0);
7001 tmp2 = neon_load_reg(rm, 0);
7002 tmp4 = tcg_const_i32(rn);
7003 tmp5 = tcg_const_i32(n);
7004 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7005 tcg_temp_free_i32(tmp);
7006 if (insn & (1 << 6)) {
7007 tmp = neon_load_reg(rd, 1);
7009 tmp = tcg_temp_new_i32();
7010 tcg_gen_movi_i32(tmp, 0);
7012 tmp3 = neon_load_reg(rm, 1);
7013 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7014 tcg_temp_free_i32(tmp5);
7015 tcg_temp_free_i32(tmp4);
7016 neon_store_reg(rd, 0, tmp2);
7017 neon_store_reg(rd, 1, tmp3);
7018 tcg_temp_free_i32(tmp);
7019 } else if ((insn & 0x380) == 0) {
7021 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7024 if (insn & (1 << 19)) {
7025 tmp = neon_load_reg(rm, 1);
7027 tmp = neon_load_reg(rm, 0);
7029 if (insn & (1 << 16)) {
7030 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7031 } else if (insn & (1 << 17)) {
7032 if ((insn >> 18) & 1)
7033 gen_neon_dup_high16(tmp);
7035 gen_neon_dup_low16(tmp);
7037 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7038 tmp2 = tcg_temp_new_i32();
7039 tcg_gen_mov_i32(tmp2, tmp);
7040 neon_store_reg(rd, pass, tmp2);
7042 tcg_temp_free_i32(tmp);
7051 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7053 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7054 const ARMCPRegInfo *ri;
7056 cpnum = (insn >> 8) & 0xf;
7058 /* First check for coprocessor space used for XScale/iwMMXt insns */
7059 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7060 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7063 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7064 return disas_iwmmxt_insn(s, insn);
7065 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7066 return disas_dsp_insn(s, insn);
7071 /* Otherwise treat as a generic register access */
7072 is64 = (insn & (1 << 25)) == 0;
7073 if (!is64 && ((insn & (1 << 4)) == 0)) {
7081 opc1 = (insn >> 4) & 0xf;
7083 rt2 = (insn >> 16) & 0xf;
7085 crn = (insn >> 16) & 0xf;
7086 opc1 = (insn >> 21) & 7;
7087 opc2 = (insn >> 5) & 7;
7090 isread = (insn >> 20) & 1;
7091 rt = (insn >> 12) & 0xf;
7093 ri = get_arm_cp_reginfo(s->cp_regs,
7094 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7096 /* Check access permissions */
7097 if (!cp_access_ok(s->current_el, ri, isread)) {
7102 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7103 /* Emit code to perform further access permissions checks at
7104 * runtime; this may result in an exception.
7105 * Note that on XScale all cp0..c13 registers do an access check
7106 * call in order to handle c15_cpar.
7112 /* Note that since we are an implementation which takes an
7113 * exception on a trapped conditional instruction only if the
7114 * instruction passes its condition code check, we can take
7115 * advantage of the clause in the ARM ARM that allows us to set
7116 * the COND field in the instruction to 0xE in all cases.
7117 * We could fish the actual condition out of the insn (ARM)
7118 * or the condexec bits (Thumb) but it isn't necessary.
7123 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7126 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7127 rt, isread, s->thumb);
7132 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7135 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7136 rt, isread, s->thumb);
7140 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7141 * so this can only happen if this is an ARMv7 or earlier CPU,
7142 * in which case the syndrome information won't actually be
7145 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7146 syndrome = syn_uncategorized();
7150 gen_set_pc_im(s, s->pc);
7151 tmpptr = tcg_const_ptr(ri);
7152 tcg_syn = tcg_const_i32(syndrome);
7153 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
7154 tcg_temp_free_ptr(tmpptr);
7155 tcg_temp_free_i32(tcg_syn);
7158 /* Handle special cases first */
7159 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7166 gen_set_pc_im(s, s->pc);
7167 s->is_jmp = DISAS_WFI;
7173 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7182 if (ri->type & ARM_CP_CONST) {
7183 tmp64 = tcg_const_i64(ri->resetvalue);
7184 } else if (ri->readfn) {
7186 tmp64 = tcg_temp_new_i64();
7187 tmpptr = tcg_const_ptr(ri);
7188 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7189 tcg_temp_free_ptr(tmpptr);
7191 tmp64 = tcg_temp_new_i64();
7192 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7194 tmp = tcg_temp_new_i32();
7195 tcg_gen_trunc_i64_i32(tmp, tmp64);
7196 store_reg(s, rt, tmp);
7197 tcg_gen_shri_i64(tmp64, tmp64, 32);
7198 tmp = tcg_temp_new_i32();
7199 tcg_gen_trunc_i64_i32(tmp, tmp64);
7200 tcg_temp_free_i64(tmp64);
7201 store_reg(s, rt2, tmp);
7204 if (ri->type & ARM_CP_CONST) {
7205 tmp = tcg_const_i32(ri->resetvalue);
7206 } else if (ri->readfn) {
7208 tmp = tcg_temp_new_i32();
7209 tmpptr = tcg_const_ptr(ri);
7210 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7211 tcg_temp_free_ptr(tmpptr);
7213 tmp = load_cpu_offset(ri->fieldoffset);
7216 /* Destination register of r15 for 32 bit loads sets
7217 * the condition codes from the high 4 bits of the value
7220 tcg_temp_free_i32(tmp);
7222 store_reg(s, rt, tmp);
7227 if (ri->type & ARM_CP_CONST) {
7228 /* If not forbidden by access permissions, treat as WI */
7233 TCGv_i32 tmplo, tmphi;
7234 TCGv_i64 tmp64 = tcg_temp_new_i64();
7235 tmplo = load_reg(s, rt);
7236 tmphi = load_reg(s, rt2);
7237 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7238 tcg_temp_free_i32(tmplo);
7239 tcg_temp_free_i32(tmphi);
7241 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7242 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7243 tcg_temp_free_ptr(tmpptr);
7245 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7247 tcg_temp_free_i64(tmp64);
7252 tmp = load_reg(s, rt);
7253 tmpptr = tcg_const_ptr(ri);
7254 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7255 tcg_temp_free_ptr(tmpptr);
7256 tcg_temp_free_i32(tmp);
7258 TCGv_i32 tmp = load_reg(s, rt);
7259 store_cpu_offset(tmp, ri->fieldoffset);
7264 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7265 /* I/O operations must end the TB here (whether read or write) */
7268 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7269 /* We default to ending the TB on a coprocessor register write,
7270 * but allow this to be suppressed by the register definition
7271 * (usually only necessary to work around guest bugs).
7279 /* Unknown register; this might be a guest error or a QEMU
7280 * unimplemented feature.
7283 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7284 "64 bit system register cp:%d opc1: %d crm:%d "
7286 isread ? "read" : "write", cpnum, opc1, crm,
7287 s->ns ? "non-secure" : "secure");
7289 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7290 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7292 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7293 s->ns ? "non-secure" : "secure");
7300 /* Store a 64-bit value to a register pair. Clobbers val. */
7301 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7304 tmp = tcg_temp_new_i32();
7305 tcg_gen_trunc_i64_i32(tmp, val);
7306 store_reg(s, rlow, tmp);
7307 tmp = tcg_temp_new_i32();
7308 tcg_gen_shri_i64(val, val, 32);
7309 tcg_gen_trunc_i64_i32(tmp, val);
7310 store_reg(s, rhigh, tmp);
7313 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7314 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7319 /* Load value and extend to 64 bits. */
7320 tmp = tcg_temp_new_i64();
7321 tmp2 = load_reg(s, rlow);
7322 tcg_gen_extu_i32_i64(tmp, tmp2);
7323 tcg_temp_free_i32(tmp2);
7324 tcg_gen_add_i64(val, val, tmp);
7325 tcg_temp_free_i64(tmp);
7328 /* load and add a 64-bit value from a register pair. */
7329 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7335 /* Load 64-bit value rd:rn. */
7336 tmpl = load_reg(s, rlow);
7337 tmph = load_reg(s, rhigh);
7338 tmp = tcg_temp_new_i64();
7339 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7340 tcg_temp_free_i32(tmpl);
7341 tcg_temp_free_i32(tmph);
7342 tcg_gen_add_i64(val, val, tmp);
7343 tcg_temp_free_i64(tmp);
7346 /* Set N and Z flags from hi|lo. */
7347 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7349 tcg_gen_mov_i32(cpu_NF, hi);
7350 tcg_gen_or_i32(cpu_ZF, lo, hi);
7353 /* Load/Store exclusive instructions are implemented by remembering
7354 the value/address loaded, and seeing if these are the same
7355 when the store is performed. This should be sufficient to implement
7356 the architecturally mandated semantics, and avoids having to monitor
7359 In system emulation mode only one CPU will be running at once, so
7360 this sequence is effectively atomic. In user emulation mode we
7361 throw an exception and handle the atomic operation elsewhere. */
7362 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7363 TCGv_i32 addr, int size)
7365 TCGv_i32 tmp = tcg_temp_new_i32();
7371 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7374 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7378 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7385 TCGv_i32 tmp2 = tcg_temp_new_i32();
7386 TCGv_i32 tmp3 = tcg_temp_new_i32();
7388 tcg_gen_addi_i32(tmp2, addr, 4);
7389 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7390 tcg_temp_free_i32(tmp2);
7391 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7392 store_reg(s, rt2, tmp3);
7394 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7397 store_reg(s, rt, tmp);
7398 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7401 static void gen_clrex(DisasContext *s)
7403 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7406 #ifdef CONFIG_USER_ONLY
7407 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7408 TCGv_i32 addr, int size)
7410 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7411 tcg_gen_movi_i32(cpu_exclusive_info,
7412 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7413 gen_exception_internal_insn(s, 4, EXCP_STREX);
7416 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7417 TCGv_i32 addr, int size)
7420 TCGv_i64 val64, extaddr;
7424 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7430 fail_label = gen_new_label();
7431 done_label = gen_new_label();
7432 extaddr = tcg_temp_new_i64();
7433 tcg_gen_extu_i32_i64(extaddr, addr);
7434 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7435 tcg_temp_free_i64(extaddr);
7437 tmp = tcg_temp_new_i32();
7440 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7443 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7447 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7453 val64 = tcg_temp_new_i64();
7455 TCGv_i32 tmp2 = tcg_temp_new_i32();
7456 TCGv_i32 tmp3 = tcg_temp_new_i32();
7457 tcg_gen_addi_i32(tmp2, addr, 4);
7458 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7459 tcg_temp_free_i32(tmp2);
7460 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7461 tcg_temp_free_i32(tmp3);
7463 tcg_gen_extu_i32_i64(val64, tmp);
7465 tcg_temp_free_i32(tmp);
7467 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7468 tcg_temp_free_i64(val64);
7470 tmp = load_reg(s, rt);
7473 gen_aa32_st8(tmp, addr, get_mem_index(s));
7476 gen_aa32_st16(tmp, addr, get_mem_index(s));
7480 gen_aa32_st32(tmp, addr, get_mem_index(s));
7485 tcg_temp_free_i32(tmp);
7487 tcg_gen_addi_i32(addr, addr, 4);
7488 tmp = load_reg(s, rt2);
7489 gen_aa32_st32(tmp, addr, get_mem_index(s));
7490 tcg_temp_free_i32(tmp);
7492 tcg_gen_movi_i32(cpu_R[rd], 0);
7493 tcg_gen_br(done_label);
7494 gen_set_label(fail_label);
7495 tcg_gen_movi_i32(cpu_R[rd], 1);
7496 gen_set_label(done_label);
7497 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7504 * @mode: mode field from insn (which stack to store to)
7505 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7506 * @writeback: true if writeback bit set
7508 * Generate code for the SRS (Store Return State) insn.
7510 static void gen_srs(DisasContext *s,
7511 uint32_t mode, uint32_t amode, bool writeback)
7514 TCGv_i32 addr = tcg_temp_new_i32();
7515 TCGv_i32 tmp = tcg_const_i32(mode);
7516 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7517 tcg_temp_free_i32(tmp);
7534 tcg_gen_addi_i32(addr, addr, offset);
7535 tmp = load_reg(s, 14);
7536 gen_aa32_st32(tmp, addr, get_mem_index(s));
7537 tcg_temp_free_i32(tmp);
7538 tmp = load_cpu_field(spsr);
7539 tcg_gen_addi_i32(addr, addr, 4);
7540 gen_aa32_st32(tmp, addr, get_mem_index(s));
7541 tcg_temp_free_i32(tmp);
7559 tcg_gen_addi_i32(addr, addr, offset);
7560 tmp = tcg_const_i32(mode);
7561 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7562 tcg_temp_free_i32(tmp);
7564 tcg_temp_free_i32(addr);
7567 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7569 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7576 /* M variants do not implement ARM mode. */
7577 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7582 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7583 * choose to UNDEF. In ARMv5 and above the space is used
7584 * for miscellaneous unconditional instructions.
7588 /* Unconditional instructions. */
7589 if (((insn >> 25) & 7) == 1) {
7590 /* NEON Data processing. */
7591 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7595 if (disas_neon_data_insn(s, insn)) {
7600 if ((insn & 0x0f100000) == 0x04000000) {
7601 /* NEON load/store. */
7602 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7606 if (disas_neon_ls_insn(s, insn)) {
7611 if ((insn & 0x0f000e10) == 0x0e000a00) {
7613 if (disas_vfp_insn(s, insn)) {
7618 if (((insn & 0x0f30f000) == 0x0510f000) ||
7619 ((insn & 0x0f30f010) == 0x0710f000)) {
7620 if ((insn & (1 << 22)) == 0) {
7622 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7626 /* Otherwise PLD; v5TE+ */
7630 if (((insn & 0x0f70f000) == 0x0450f000) ||
7631 ((insn & 0x0f70f010) == 0x0650f000)) {
7633 return; /* PLI; V7 */
7635 if (((insn & 0x0f700000) == 0x04100000) ||
7636 ((insn & 0x0f700010) == 0x06100000)) {
7637 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7640 return; /* v7MP: Unallocated memory hint: must NOP */
7643 if ((insn & 0x0ffffdff) == 0x01010000) {
7646 if (((insn >> 9) & 1) != s->bswap_code) {
7647 /* Dynamic endianness switching not implemented. */
7648 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
7652 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7653 switch ((insn >> 4) & 0xf) {
7662 /* We don't emulate caches so these are a no-op. */
7667 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7673 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7675 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7681 rn = (insn >> 16) & 0xf;
7682 addr = load_reg(s, rn);
7683 i = (insn >> 23) & 3;
7685 case 0: offset = -4; break; /* DA */
7686 case 1: offset = 0; break; /* IA */
7687 case 2: offset = -8; break; /* DB */
7688 case 3: offset = 4; break; /* IB */
7692 tcg_gen_addi_i32(addr, addr, offset);
7693 /* Load PC into tmp and CPSR into tmp2. */
7694 tmp = tcg_temp_new_i32();
7695 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7696 tcg_gen_addi_i32(addr, addr, 4);
7697 tmp2 = tcg_temp_new_i32();
7698 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
7699 if (insn & (1 << 21)) {
7700 /* Base writeback. */
7702 case 0: offset = -8; break;
7703 case 1: offset = 4; break;
7704 case 2: offset = -4; break;
7705 case 3: offset = 0; break;
7709 tcg_gen_addi_i32(addr, addr, offset);
7710 store_reg(s, rn, addr);
7712 tcg_temp_free_i32(addr);
7714 gen_rfe(s, tmp, tmp2);
7716 } else if ((insn & 0x0e000000) == 0x0a000000) {
7717 /* branch link and change to thumb (blx <offset>) */
7720 val = (uint32_t)s->pc;
7721 tmp = tcg_temp_new_i32();
7722 tcg_gen_movi_i32(tmp, val);
7723 store_reg(s, 14, tmp);
7724 /* Sign-extend the 24-bit offset */
7725 offset = (((int32_t)insn) << 8) >> 8;
7726 /* offset * 4 + bit24 * 2 + (thumb bit) */
7727 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7728 /* pipeline offset */
7730 /* protected by ARCH(5); above, near the start of uncond block */
7733 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7734 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7735 /* iWMMXt register transfer. */
7736 if (extract32(s->c15_cpar, 1, 1)) {
7737 if (!disas_iwmmxt_insn(s, insn)) {
7742 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7743 /* Coprocessor double register transfer. */
7745 } else if ((insn & 0x0f000010) == 0x0e000010) {
7746 /* Additional coprocessor register transfer. */
7747 } else if ((insn & 0x0ff10020) == 0x01000000) {
7750 /* cps (privileged) */
7754 if (insn & (1 << 19)) {
7755 if (insn & (1 << 8))
7757 if (insn & (1 << 7))
7759 if (insn & (1 << 6))
7761 if (insn & (1 << 18))
7764 if (insn & (1 << 17)) {
7766 val |= (insn & 0x1f);
7769 gen_set_psr_im(s, mask, 0, val);
7776 /* if not always execute, we generate a conditional jump to
7778 s->condlabel = gen_new_label();
7779 arm_gen_test_cc(cond ^ 1, s->condlabel);
7782 if ((insn & 0x0f900000) == 0x03000000) {
7783 if ((insn & (1 << 21)) == 0) {
7785 rd = (insn >> 12) & 0xf;
7786 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7787 if ((insn & (1 << 22)) == 0) {
7789 tmp = tcg_temp_new_i32();
7790 tcg_gen_movi_i32(tmp, val);
7793 tmp = load_reg(s, rd);
7794 tcg_gen_ext16u_i32(tmp, tmp);
7795 tcg_gen_ori_i32(tmp, tmp, val << 16);
7797 store_reg(s, rd, tmp);
7799 if (((insn >> 12) & 0xf) != 0xf)
7801 if (((insn >> 16) & 0xf) == 0) {
7802 gen_nop_hint(s, insn & 0xff);
7804 /* CPSR = immediate */
7806 shift = ((insn >> 8) & 0xf) * 2;
7808 val = (val >> shift) | (val << (32 - shift));
7809 i = ((insn & (1 << 22)) != 0);
7810 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7816 } else if ((insn & 0x0f900000) == 0x01000000
7817 && (insn & 0x00000090) != 0x00000090) {
7818 /* miscellaneous instructions */
7819 op1 = (insn >> 21) & 3;
7820 sh = (insn >> 4) & 0xf;
7823 case 0x0: /* move program status register */
7826 tmp = load_reg(s, rm);
7827 i = ((op1 & 2) != 0);
7828 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
7832 rd = (insn >> 12) & 0xf;
7836 tmp = load_cpu_field(spsr);
7838 tmp = tcg_temp_new_i32();
7839 gen_helper_cpsr_read(tmp, cpu_env);
7841 store_reg(s, rd, tmp);
7846 /* branch/exchange thumb (bx). */
7848 tmp = load_reg(s, rm);
7850 } else if (op1 == 3) {
7853 rd = (insn >> 12) & 0xf;
7854 tmp = load_reg(s, rm);
7855 gen_helper_clz(tmp, tmp);
7856 store_reg(s, rd, tmp);
7864 /* Trivial implementation equivalent to bx. */
7865 tmp = load_reg(s, rm);
7876 /* branch link/exchange thumb (blx) */
7877 tmp = load_reg(s, rm);
7878 tmp2 = tcg_temp_new_i32();
7879 tcg_gen_movi_i32(tmp2, s->pc);
7880 store_reg(s, 14, tmp2);
7886 uint32_t c = extract32(insn, 8, 4);
7888 /* Check this CPU supports ARMv8 CRC instructions.
7889 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7890 * Bits 8, 10 and 11 should be zero.
7892 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
7897 rn = extract32(insn, 16, 4);
7898 rd = extract32(insn, 12, 4);
7900 tmp = load_reg(s, rn);
7901 tmp2 = load_reg(s, rm);
7903 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7904 } else if (op1 == 1) {
7905 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7907 tmp3 = tcg_const_i32(1 << op1);
7909 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7911 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7913 tcg_temp_free_i32(tmp2);
7914 tcg_temp_free_i32(tmp3);
7915 store_reg(s, rd, tmp);
7918 case 0x5: /* saturating add/subtract */
7920 rd = (insn >> 12) & 0xf;
7921 rn = (insn >> 16) & 0xf;
7922 tmp = load_reg(s, rm);
7923 tmp2 = load_reg(s, rn);
7925 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
7927 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
7929 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7930 tcg_temp_free_i32(tmp2);
7931 store_reg(s, rd, tmp);
7935 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
7940 gen_exception_insn(s, 4, EXCP_BKPT,
7941 syn_aa32_bkpt(imm16, false));
7944 /* Hypervisor call (v7) */
7952 /* Secure monitor call (v6+) */
7964 case 0x8: /* signed multiply */
7969 rs = (insn >> 8) & 0xf;
7970 rn = (insn >> 12) & 0xf;
7971 rd = (insn >> 16) & 0xf;
7973 /* (32 * 16) >> 16 */
7974 tmp = load_reg(s, rm);
7975 tmp2 = load_reg(s, rs);
7977 tcg_gen_sari_i32(tmp2, tmp2, 16);
7980 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7981 tcg_gen_shri_i64(tmp64, tmp64, 16);
7982 tmp = tcg_temp_new_i32();
7983 tcg_gen_trunc_i64_i32(tmp, tmp64);
7984 tcg_temp_free_i64(tmp64);
7985 if ((sh & 2) == 0) {
7986 tmp2 = load_reg(s, rn);
7987 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7988 tcg_temp_free_i32(tmp2);
7990 store_reg(s, rd, tmp);
7993 tmp = load_reg(s, rm);
7994 tmp2 = load_reg(s, rs);
7995 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7996 tcg_temp_free_i32(tmp2);
7998 tmp64 = tcg_temp_new_i64();
7999 tcg_gen_ext_i32_i64(tmp64, tmp);
8000 tcg_temp_free_i32(tmp);
8001 gen_addq(s, tmp64, rn, rd);
8002 gen_storeq_reg(s, rn, rd, tmp64);
8003 tcg_temp_free_i64(tmp64);
8006 tmp2 = load_reg(s, rn);
8007 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8008 tcg_temp_free_i32(tmp2);
8010 store_reg(s, rd, tmp);
8017 } else if (((insn & 0x0e000000) == 0 &&
8018 (insn & 0x00000090) != 0x90) ||
8019 ((insn & 0x0e000000) == (1 << 25))) {
8020 int set_cc, logic_cc, shiftop;
8022 op1 = (insn >> 21) & 0xf;
8023 set_cc = (insn >> 20) & 1;
8024 logic_cc = table_logic_cc[op1] & set_cc;
8026 /* data processing instruction */
8027 if (insn & (1 << 25)) {
8028 /* immediate operand */
8030 shift = ((insn >> 8) & 0xf) * 2;
8032 val = (val >> shift) | (val << (32 - shift));
8034 tmp2 = tcg_temp_new_i32();
8035 tcg_gen_movi_i32(tmp2, val);
8036 if (logic_cc && shift) {
8037 gen_set_CF_bit31(tmp2);
8042 tmp2 = load_reg(s, rm);
8043 shiftop = (insn >> 5) & 3;
8044 if (!(insn & (1 << 4))) {
8045 shift = (insn >> 7) & 0x1f;
8046 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8048 rs = (insn >> 8) & 0xf;
8049 tmp = load_reg(s, rs);
8050 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8053 if (op1 != 0x0f && op1 != 0x0d) {
8054 rn = (insn >> 16) & 0xf;
8055 tmp = load_reg(s, rn);
8057 TCGV_UNUSED_I32(tmp);
8059 rd = (insn >> 12) & 0xf;
8062 tcg_gen_and_i32(tmp, tmp, tmp2);
8066 store_reg_bx(s, rd, tmp);
8069 tcg_gen_xor_i32(tmp, tmp, tmp2);
8073 store_reg_bx(s, rd, tmp);
8076 if (set_cc && rd == 15) {
8077 /* SUBS r15, ... is used for exception return. */
8081 gen_sub_CC(tmp, tmp, tmp2);
8082 gen_exception_return(s, tmp);
8085 gen_sub_CC(tmp, tmp, tmp2);
8087 tcg_gen_sub_i32(tmp, tmp, tmp2);
8089 store_reg_bx(s, rd, tmp);
8094 gen_sub_CC(tmp, tmp2, tmp);
8096 tcg_gen_sub_i32(tmp, tmp2, tmp);
8098 store_reg_bx(s, rd, tmp);
8102 gen_add_CC(tmp, tmp, tmp2);
8104 tcg_gen_add_i32(tmp, tmp, tmp2);
8106 store_reg_bx(s, rd, tmp);
8110 gen_adc_CC(tmp, tmp, tmp2);
8112 gen_add_carry(tmp, tmp, tmp2);
8114 store_reg_bx(s, rd, tmp);
8118 gen_sbc_CC(tmp, tmp, tmp2);
8120 gen_sub_carry(tmp, tmp, tmp2);
8122 store_reg_bx(s, rd, tmp);
8126 gen_sbc_CC(tmp, tmp2, tmp);
8128 gen_sub_carry(tmp, tmp2, tmp);
8130 store_reg_bx(s, rd, tmp);
8134 tcg_gen_and_i32(tmp, tmp, tmp2);
8137 tcg_temp_free_i32(tmp);
8141 tcg_gen_xor_i32(tmp, tmp, tmp2);
8144 tcg_temp_free_i32(tmp);
8148 gen_sub_CC(tmp, tmp, tmp2);
8150 tcg_temp_free_i32(tmp);
8154 gen_add_CC(tmp, tmp, tmp2);
8156 tcg_temp_free_i32(tmp);
8159 tcg_gen_or_i32(tmp, tmp, tmp2);
8163 store_reg_bx(s, rd, tmp);
8166 if (logic_cc && rd == 15) {
8167 /* MOVS r15, ... is used for exception return. */
8171 gen_exception_return(s, tmp2);
8176 store_reg_bx(s, rd, tmp2);
8180 tcg_gen_andc_i32(tmp, tmp, tmp2);
8184 store_reg_bx(s, rd, tmp);
8188 tcg_gen_not_i32(tmp2, tmp2);
8192 store_reg_bx(s, rd, tmp2);
8195 if (op1 != 0x0f && op1 != 0x0d) {
8196 tcg_temp_free_i32(tmp2);
8199 /* other instructions */
8200 op1 = (insn >> 24) & 0xf;
8204 /* multiplies, extra load/stores */
8205 sh = (insn >> 5) & 3;
8208 rd = (insn >> 16) & 0xf;
8209 rn = (insn >> 12) & 0xf;
8210 rs = (insn >> 8) & 0xf;
8212 op1 = (insn >> 20) & 0xf;
8214 case 0: case 1: case 2: case 3: case 6:
8216 tmp = load_reg(s, rs);
8217 tmp2 = load_reg(s, rm);
8218 tcg_gen_mul_i32(tmp, tmp, tmp2);
8219 tcg_temp_free_i32(tmp2);
8220 if (insn & (1 << 22)) {
8221 /* Subtract (mls) */
8223 tmp2 = load_reg(s, rn);
8224 tcg_gen_sub_i32(tmp, tmp2, tmp);
8225 tcg_temp_free_i32(tmp2);
8226 } else if (insn & (1 << 21)) {
8228 tmp2 = load_reg(s, rn);
8229 tcg_gen_add_i32(tmp, tmp, tmp2);
8230 tcg_temp_free_i32(tmp2);
8232 if (insn & (1 << 20))
8234 store_reg(s, rd, tmp);
8237 /* 64 bit mul double accumulate (UMAAL) */
8239 tmp = load_reg(s, rs);
8240 tmp2 = load_reg(s, rm);
8241 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8242 gen_addq_lo(s, tmp64, rn);
8243 gen_addq_lo(s, tmp64, rd);
8244 gen_storeq_reg(s, rn, rd, tmp64);
8245 tcg_temp_free_i64(tmp64);
8247 case 8: case 9: case 10: case 11:
8248 case 12: case 13: case 14: case 15:
8249 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8250 tmp = load_reg(s, rs);
8251 tmp2 = load_reg(s, rm);
8252 if (insn & (1 << 22)) {
8253 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8255 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8257 if (insn & (1 << 21)) { /* mult accumulate */
8258 TCGv_i32 al = load_reg(s, rn);
8259 TCGv_i32 ah = load_reg(s, rd);
8260 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8261 tcg_temp_free_i32(al);
8262 tcg_temp_free_i32(ah);
8264 if (insn & (1 << 20)) {
8265 gen_logicq_cc(tmp, tmp2);
8267 store_reg(s, rn, tmp);
8268 store_reg(s, rd, tmp2);
8274 rn = (insn >> 16) & 0xf;
8275 rd = (insn >> 12) & 0xf;
8276 if (insn & (1 << 23)) {
8277 /* load/store exclusive */
8278 int op2 = (insn >> 8) & 3;
8279 op1 = (insn >> 21) & 0x3;
8282 case 0: /* lda/stl */
8288 case 1: /* reserved */
8290 case 2: /* ldaex/stlex */
8293 case 3: /* ldrex/strex */
8302 addr = tcg_temp_local_new_i32();
8303 load_reg_var(s, addr, rn);
8305 /* Since the emulation does not have barriers,
8306 the acquire/release semantics need no special
8309 if (insn & (1 << 20)) {
8310 tmp = tcg_temp_new_i32();
8313 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8316 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8319 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8324 store_reg(s, rd, tmp);
8327 tmp = load_reg(s, rm);
8330 gen_aa32_st32(tmp, addr, get_mem_index(s));
8333 gen_aa32_st8(tmp, addr, get_mem_index(s));
8336 gen_aa32_st16(tmp, addr, get_mem_index(s));
8341 tcg_temp_free_i32(tmp);
8343 } else if (insn & (1 << 20)) {
8346 gen_load_exclusive(s, rd, 15, addr, 2);
8348 case 1: /* ldrexd */
8349 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8351 case 2: /* ldrexb */
8352 gen_load_exclusive(s, rd, 15, addr, 0);
8354 case 3: /* ldrexh */
8355 gen_load_exclusive(s, rd, 15, addr, 1);
8364 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8366 case 1: /* strexd */
8367 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8369 case 2: /* strexb */
8370 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8372 case 3: /* strexh */
8373 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8379 tcg_temp_free_i32(addr);
8381 /* SWP instruction */
8384 /* ??? This is not really atomic. However we know
8385 we never have multiple CPUs running in parallel,
8386 so it is good enough. */
8387 addr = load_reg(s, rn);
8388 tmp = load_reg(s, rm);
8389 tmp2 = tcg_temp_new_i32();
8390 if (insn & (1 << 22)) {
8391 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8392 gen_aa32_st8(tmp, addr, get_mem_index(s));
8394 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8395 gen_aa32_st32(tmp, addr, get_mem_index(s));
8397 tcg_temp_free_i32(tmp);
8398 tcg_temp_free_i32(addr);
8399 store_reg(s, rd, tmp2);
8405 /* Misc load/store */
8406 rn = (insn >> 16) & 0xf;
8407 rd = (insn >> 12) & 0xf;
8408 addr = load_reg(s, rn);
8409 if (insn & (1 << 24))
8410 gen_add_datah_offset(s, insn, 0, addr);
8412 if (insn & (1 << 20)) {
8414 tmp = tcg_temp_new_i32();
8417 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8420 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8424 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8428 } else if (sh & 2) {
8433 tmp = load_reg(s, rd);
8434 gen_aa32_st32(tmp, addr, get_mem_index(s));
8435 tcg_temp_free_i32(tmp);
8436 tcg_gen_addi_i32(addr, addr, 4);
8437 tmp = load_reg(s, rd + 1);
8438 gen_aa32_st32(tmp, addr, get_mem_index(s));
8439 tcg_temp_free_i32(tmp);
8443 tmp = tcg_temp_new_i32();
8444 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8445 store_reg(s, rd, tmp);
8446 tcg_gen_addi_i32(addr, addr, 4);
8447 tmp = tcg_temp_new_i32();
8448 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8452 address_offset = -4;
8455 tmp = load_reg(s, rd);
8456 gen_aa32_st16(tmp, addr, get_mem_index(s));
8457 tcg_temp_free_i32(tmp);
8460 /* Perform base writeback before the loaded value to
8461 ensure correct behavior with overlapping index registers.
8462 ldrd with base writeback is is undefined if the
8463 destination and index registers overlap. */
8464 if (!(insn & (1 << 24))) {
8465 gen_add_datah_offset(s, insn, address_offset, addr);
8466 store_reg(s, rn, addr);
8467 } else if (insn & (1 << 21)) {
8469 tcg_gen_addi_i32(addr, addr, address_offset);
8470 store_reg(s, rn, addr);
8472 tcg_temp_free_i32(addr);
8475 /* Complete the load. */
8476 store_reg(s, rd, tmp);
8485 if (insn & (1 << 4)) {
8487 /* Armv6 Media instructions. */
8489 rn = (insn >> 16) & 0xf;
8490 rd = (insn >> 12) & 0xf;
8491 rs = (insn >> 8) & 0xf;
8492 switch ((insn >> 23) & 3) {
8493 case 0: /* Parallel add/subtract. */
8494 op1 = (insn >> 20) & 7;
8495 tmp = load_reg(s, rn);
8496 tmp2 = load_reg(s, rm);
8497 sh = (insn >> 5) & 7;
8498 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8500 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8501 tcg_temp_free_i32(tmp2);
8502 store_reg(s, rd, tmp);
8505 if ((insn & 0x00700020) == 0) {
8506 /* Halfword pack. */
8507 tmp = load_reg(s, rn);
8508 tmp2 = load_reg(s, rm);
8509 shift = (insn >> 7) & 0x1f;
8510 if (insn & (1 << 6)) {
8514 tcg_gen_sari_i32(tmp2, tmp2, shift);
8515 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8516 tcg_gen_ext16u_i32(tmp2, tmp2);
8520 tcg_gen_shli_i32(tmp2, tmp2, shift);
8521 tcg_gen_ext16u_i32(tmp, tmp);
8522 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8524 tcg_gen_or_i32(tmp, tmp, tmp2);
8525 tcg_temp_free_i32(tmp2);
8526 store_reg(s, rd, tmp);
8527 } else if ((insn & 0x00200020) == 0x00200000) {
8529 tmp = load_reg(s, rm);
8530 shift = (insn >> 7) & 0x1f;
8531 if (insn & (1 << 6)) {
8534 tcg_gen_sari_i32(tmp, tmp, shift);
8536 tcg_gen_shli_i32(tmp, tmp, shift);
8538 sh = (insn >> 16) & 0x1f;
8539 tmp2 = tcg_const_i32(sh);
8540 if (insn & (1 << 22))
8541 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8543 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8544 tcg_temp_free_i32(tmp2);
8545 store_reg(s, rd, tmp);
8546 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8548 tmp = load_reg(s, rm);
8549 sh = (insn >> 16) & 0x1f;
8550 tmp2 = tcg_const_i32(sh);
8551 if (insn & (1 << 22))
8552 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8554 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8555 tcg_temp_free_i32(tmp2);
8556 store_reg(s, rd, tmp);
8557 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8559 tmp = load_reg(s, rn);
8560 tmp2 = load_reg(s, rm);
8561 tmp3 = tcg_temp_new_i32();
8562 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8563 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8564 tcg_temp_free_i32(tmp3);
8565 tcg_temp_free_i32(tmp2);
8566 store_reg(s, rd, tmp);
8567 } else if ((insn & 0x000003e0) == 0x00000060) {
8568 tmp = load_reg(s, rm);
8569 shift = (insn >> 10) & 3;
8570 /* ??? In many cases it's not necessary to do a
8571 rotate, a shift is sufficient. */
8573 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8574 op1 = (insn >> 20) & 7;
8576 case 0: gen_sxtb16(tmp); break;
8577 case 2: gen_sxtb(tmp); break;
8578 case 3: gen_sxth(tmp); break;
8579 case 4: gen_uxtb16(tmp); break;
8580 case 6: gen_uxtb(tmp); break;
8581 case 7: gen_uxth(tmp); break;
8582 default: goto illegal_op;
8585 tmp2 = load_reg(s, rn);
8586 if ((op1 & 3) == 0) {
8587 gen_add16(tmp, tmp2);
8589 tcg_gen_add_i32(tmp, tmp, tmp2);
8590 tcg_temp_free_i32(tmp2);
8593 store_reg(s, rd, tmp);
8594 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8596 tmp = load_reg(s, rm);
8597 if (insn & (1 << 22)) {
8598 if (insn & (1 << 7)) {
8602 gen_helper_rbit(tmp, tmp);
8605 if (insn & (1 << 7))
8608 tcg_gen_bswap32_i32(tmp, tmp);
8610 store_reg(s, rd, tmp);
8615 case 2: /* Multiplies (Type 3). */
8616 switch ((insn >> 20) & 0x7) {
8618 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8619 /* op2 not 00x or 11x : UNDEF */
8622 /* Signed multiply most significant [accumulate].
8623 (SMMUL, SMMLA, SMMLS) */
8624 tmp = load_reg(s, rm);
8625 tmp2 = load_reg(s, rs);
8626 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8629 tmp = load_reg(s, rd);
8630 if (insn & (1 << 6)) {
8631 tmp64 = gen_subq_msw(tmp64, tmp);
8633 tmp64 = gen_addq_msw(tmp64, tmp);
8636 if (insn & (1 << 5)) {
8637 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8639 tcg_gen_shri_i64(tmp64, tmp64, 32);
8640 tmp = tcg_temp_new_i32();
8641 tcg_gen_trunc_i64_i32(tmp, tmp64);
8642 tcg_temp_free_i64(tmp64);
8643 store_reg(s, rn, tmp);
8647 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8648 if (insn & (1 << 7)) {
8651 tmp = load_reg(s, rm);
8652 tmp2 = load_reg(s, rs);
8653 if (insn & (1 << 5))
8654 gen_swap_half(tmp2);
8655 gen_smul_dual(tmp, tmp2);
8656 if (insn & (1 << 22)) {
8657 /* smlald, smlsld */
8660 tmp64 = tcg_temp_new_i64();
8661 tmp64_2 = tcg_temp_new_i64();
8662 tcg_gen_ext_i32_i64(tmp64, tmp);
8663 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8664 tcg_temp_free_i32(tmp);
8665 tcg_temp_free_i32(tmp2);
8666 if (insn & (1 << 6)) {
8667 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8669 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8671 tcg_temp_free_i64(tmp64_2);
8672 gen_addq(s, tmp64, rd, rn);
8673 gen_storeq_reg(s, rd, rn, tmp64);
8674 tcg_temp_free_i64(tmp64);
8676 /* smuad, smusd, smlad, smlsd */
8677 if (insn & (1 << 6)) {
8678 /* This subtraction cannot overflow. */
8679 tcg_gen_sub_i32(tmp, tmp, tmp2);
8681 /* This addition cannot overflow 32 bits;
8682 * however it may overflow considered as a
8683 * signed operation, in which case we must set
8686 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8688 tcg_temp_free_i32(tmp2);
8691 tmp2 = load_reg(s, rd);
8692 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8693 tcg_temp_free_i32(tmp2);
8695 store_reg(s, rn, tmp);
8701 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
8704 if (((insn >> 5) & 7) || (rd != 15)) {
8707 tmp = load_reg(s, rm);
8708 tmp2 = load_reg(s, rs);
8709 if (insn & (1 << 21)) {
8710 gen_helper_udiv(tmp, tmp, tmp2);
8712 gen_helper_sdiv(tmp, tmp, tmp2);
8714 tcg_temp_free_i32(tmp2);
8715 store_reg(s, rn, tmp);
8722 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8724 case 0: /* Unsigned sum of absolute differences. */
8726 tmp = load_reg(s, rm);
8727 tmp2 = load_reg(s, rs);
8728 gen_helper_usad8(tmp, tmp, tmp2);
8729 tcg_temp_free_i32(tmp2);
8731 tmp2 = load_reg(s, rd);
8732 tcg_gen_add_i32(tmp, tmp, tmp2);
8733 tcg_temp_free_i32(tmp2);
8735 store_reg(s, rn, tmp);
8737 case 0x20: case 0x24: case 0x28: case 0x2c:
8738 /* Bitfield insert/clear. */
8740 shift = (insn >> 7) & 0x1f;
8741 i = (insn >> 16) & 0x1f;
8744 tmp = tcg_temp_new_i32();
8745 tcg_gen_movi_i32(tmp, 0);
8747 tmp = load_reg(s, rm);
8750 tmp2 = load_reg(s, rd);
8751 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8752 tcg_temp_free_i32(tmp2);
8754 store_reg(s, rd, tmp);
8756 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8757 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8759 tmp = load_reg(s, rm);
8760 shift = (insn >> 7) & 0x1f;
8761 i = ((insn >> 16) & 0x1f) + 1;
8766 gen_ubfx(tmp, shift, (1u << i) - 1);
8768 gen_sbfx(tmp, shift, i);
8771 store_reg(s, rd, tmp);
8781 /* Check for undefined extension instructions
8782 * per the ARM Bible IE:
8783 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8785 sh = (0xf << 20) | (0xf << 4);
8786 if (op1 == 0x7 && ((insn & sh) == sh))
8790 /* load/store byte/word */
8791 rn = (insn >> 16) & 0xf;
8792 rd = (insn >> 12) & 0xf;
8793 tmp2 = load_reg(s, rn);
8794 if ((insn & 0x01200000) == 0x00200000) {
8798 i = get_mem_index(s);
8800 if (insn & (1 << 24))
8801 gen_add_data_offset(s, insn, tmp2);
8802 if (insn & (1 << 20)) {
8804 tmp = tcg_temp_new_i32();
8805 if (insn & (1 << 22)) {
8806 gen_aa32_ld8u(tmp, tmp2, i);
8808 gen_aa32_ld32u(tmp, tmp2, i);
8812 tmp = load_reg(s, rd);
8813 if (insn & (1 << 22)) {
8814 gen_aa32_st8(tmp, tmp2, i);
8816 gen_aa32_st32(tmp, tmp2, i);
8818 tcg_temp_free_i32(tmp);
8820 if (!(insn & (1 << 24))) {
8821 gen_add_data_offset(s, insn, tmp2);
8822 store_reg(s, rn, tmp2);
8823 } else if (insn & (1 << 21)) {
8824 store_reg(s, rn, tmp2);
8826 tcg_temp_free_i32(tmp2);
8828 if (insn & (1 << 20)) {
8829 /* Complete the load. */
8830 store_reg_from_load(s, rd, tmp);
8836 int j, n, user, loaded_base;
8837 TCGv_i32 loaded_var;
8838 /* load/store multiple words */
8839 /* XXX: store correct base if write back */
8841 if (insn & (1 << 22)) {
8843 goto illegal_op; /* only usable in supervisor mode */
8845 if ((insn & (1 << 15)) == 0)
8848 rn = (insn >> 16) & 0xf;
8849 addr = load_reg(s, rn);
8851 /* compute total size */
8853 TCGV_UNUSED_I32(loaded_var);
8856 if (insn & (1 << i))
8859 /* XXX: test invalid n == 0 case ? */
8860 if (insn & (1 << 23)) {
8861 if (insn & (1 << 24)) {
8863 tcg_gen_addi_i32(addr, addr, 4);
8865 /* post increment */
8868 if (insn & (1 << 24)) {
8870 tcg_gen_addi_i32(addr, addr, -(n * 4));
8872 /* post decrement */
8874 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8879 if (insn & (1 << i)) {
8880 if (insn & (1 << 20)) {
8882 tmp = tcg_temp_new_i32();
8883 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8885 tmp2 = tcg_const_i32(i);
8886 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
8887 tcg_temp_free_i32(tmp2);
8888 tcg_temp_free_i32(tmp);
8889 } else if (i == rn) {
8893 store_reg_from_load(s, i, tmp);
8898 /* special case: r15 = PC + 8 */
8899 val = (long)s->pc + 4;
8900 tmp = tcg_temp_new_i32();
8901 tcg_gen_movi_i32(tmp, val);
8903 tmp = tcg_temp_new_i32();
8904 tmp2 = tcg_const_i32(i);
8905 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
8906 tcg_temp_free_i32(tmp2);
8908 tmp = load_reg(s, i);
8910 gen_aa32_st32(tmp, addr, get_mem_index(s));
8911 tcg_temp_free_i32(tmp);
8914 /* no need to add after the last transfer */
8916 tcg_gen_addi_i32(addr, addr, 4);
8919 if (insn & (1 << 21)) {
8921 if (insn & (1 << 23)) {
8922 if (insn & (1 << 24)) {
8925 /* post increment */
8926 tcg_gen_addi_i32(addr, addr, 4);
8929 if (insn & (1 << 24)) {
8932 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8934 /* post decrement */
8935 tcg_gen_addi_i32(addr, addr, -(n * 4));
8938 store_reg(s, rn, addr);
8940 tcg_temp_free_i32(addr);
8943 store_reg(s, rn, loaded_var);
8945 if ((insn & (1 << 22)) && !user) {
8946 /* Restore CPSR from SPSR. */
8947 tmp = load_cpu_field(spsr);
8948 gen_set_cpsr(tmp, CPSR_ERET_MASK);
8949 tcg_temp_free_i32(tmp);
8950 s->is_jmp = DISAS_UPDATE;
8959 /* branch (and link) */
8960 val = (int32_t)s->pc;
8961 if (insn & (1 << 24)) {
8962 tmp = tcg_temp_new_i32();
8963 tcg_gen_movi_i32(tmp, val);
8964 store_reg(s, 14, tmp);
8966 offset = sextract32(insn << 2, 0, 26);
8974 if (((insn >> 8) & 0xe) == 10) {
8976 if (disas_vfp_insn(s, insn)) {
8979 } else if (disas_coproc_insn(s, insn)) {
8986 gen_set_pc_im(s, s->pc);
8987 s->svc_imm = extract32(insn, 0, 24);
8988 s->is_jmp = DISAS_SWI;
8992 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
8998 /* Return true if this is a Thumb-2 logical op. */
9000 thumb2_logic_op(int op)
9005 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9006 then set condition code flags based on the result of the operation.
9007 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9008 to the high bit of T1.
9009 Returns zero if the opcode is valid. */
9012 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9013 TCGv_i32 t0, TCGv_i32 t1)
9020 tcg_gen_and_i32(t0, t0, t1);
9024 tcg_gen_andc_i32(t0, t0, t1);
9028 tcg_gen_or_i32(t0, t0, t1);
9032 tcg_gen_orc_i32(t0, t0, t1);
9036 tcg_gen_xor_i32(t0, t0, t1);
9041 gen_add_CC(t0, t0, t1);
9043 tcg_gen_add_i32(t0, t0, t1);
9047 gen_adc_CC(t0, t0, t1);
9053 gen_sbc_CC(t0, t0, t1);
9055 gen_sub_carry(t0, t0, t1);
9060 gen_sub_CC(t0, t0, t1);
9062 tcg_gen_sub_i32(t0, t0, t1);
9066 gen_sub_CC(t0, t1, t0);
9068 tcg_gen_sub_i32(t0, t1, t0);
9070 default: /* 5, 6, 7, 9, 12, 15. */
9076 gen_set_CF_bit31(t1);
9081 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9083 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9085 uint32_t insn, imm, shift, offset;
9086 uint32_t rd, rn, rm, rs;
9097 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9098 || arm_dc_feature(s, ARM_FEATURE_M))) {
9099 /* Thumb-1 cores may need to treat bl and blx as a pair of
9100 16-bit instructions to get correct prefetch abort behavior. */
9102 if ((insn & (1 << 12)) == 0) {
9104 /* Second half of blx. */
9105 offset = ((insn & 0x7ff) << 1);
9106 tmp = load_reg(s, 14);
9107 tcg_gen_addi_i32(tmp, tmp, offset);
9108 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9110 tmp2 = tcg_temp_new_i32();
9111 tcg_gen_movi_i32(tmp2, s->pc | 1);
9112 store_reg(s, 14, tmp2);
9116 if (insn & (1 << 11)) {
9117 /* Second half of bl. */
9118 offset = ((insn & 0x7ff) << 1) | 1;
9119 tmp = load_reg(s, 14);
9120 tcg_gen_addi_i32(tmp, tmp, offset);
9122 tmp2 = tcg_temp_new_i32();
9123 tcg_gen_movi_i32(tmp2, s->pc | 1);
9124 store_reg(s, 14, tmp2);
9128 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9129 /* Instruction spans a page boundary. Implement it as two
9130 16-bit instructions in case the second half causes an
9132 offset = ((int32_t)insn << 21) >> 9;
9133 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9136 /* Fall through to 32-bit decode. */
9139 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9141 insn |= (uint32_t)insn_hw1 << 16;
9143 if ((insn & 0xf800e800) != 0xf000e800) {
9147 rn = (insn >> 16) & 0xf;
9148 rs = (insn >> 12) & 0xf;
9149 rd = (insn >> 8) & 0xf;
9151 switch ((insn >> 25) & 0xf) {
9152 case 0: case 1: case 2: case 3:
9153 /* 16-bit instructions. Should never happen. */
9156 if (insn & (1 << 22)) {
9157 /* Other load/store, table branch. */
9158 if (insn & 0x01200000) {
9159 /* Load/store doubleword. */
9161 addr = tcg_temp_new_i32();
9162 tcg_gen_movi_i32(addr, s->pc & ~3);
9164 addr = load_reg(s, rn);
9166 offset = (insn & 0xff) * 4;
9167 if ((insn & (1 << 23)) == 0)
9169 if (insn & (1 << 24)) {
9170 tcg_gen_addi_i32(addr, addr, offset);
9173 if (insn & (1 << 20)) {
9175 tmp = tcg_temp_new_i32();
9176 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9177 store_reg(s, rs, tmp);
9178 tcg_gen_addi_i32(addr, addr, 4);
9179 tmp = tcg_temp_new_i32();
9180 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9181 store_reg(s, rd, tmp);
9184 tmp = load_reg(s, rs);
9185 gen_aa32_st32(tmp, addr, get_mem_index(s));
9186 tcg_temp_free_i32(tmp);
9187 tcg_gen_addi_i32(addr, addr, 4);
9188 tmp = load_reg(s, rd);
9189 gen_aa32_st32(tmp, addr, get_mem_index(s));
9190 tcg_temp_free_i32(tmp);
9192 if (insn & (1 << 21)) {
9193 /* Base writeback. */
9196 tcg_gen_addi_i32(addr, addr, offset - 4);
9197 store_reg(s, rn, addr);
9199 tcg_temp_free_i32(addr);
9201 } else if ((insn & (1 << 23)) == 0) {
9202 /* Load/store exclusive word. */
9203 addr = tcg_temp_local_new_i32();
9204 load_reg_var(s, addr, rn);
9205 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9206 if (insn & (1 << 20)) {
9207 gen_load_exclusive(s, rs, 15, addr, 2);
9209 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9211 tcg_temp_free_i32(addr);
9212 } else if ((insn & (7 << 5)) == 0) {
9215 addr = tcg_temp_new_i32();
9216 tcg_gen_movi_i32(addr, s->pc);
9218 addr = load_reg(s, rn);
9220 tmp = load_reg(s, rm);
9221 tcg_gen_add_i32(addr, addr, tmp);
9222 if (insn & (1 << 4)) {
9224 tcg_gen_add_i32(addr, addr, tmp);
9225 tcg_temp_free_i32(tmp);
9226 tmp = tcg_temp_new_i32();
9227 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9229 tcg_temp_free_i32(tmp);
9230 tmp = tcg_temp_new_i32();
9231 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9233 tcg_temp_free_i32(addr);
9234 tcg_gen_shli_i32(tmp, tmp, 1);
9235 tcg_gen_addi_i32(tmp, tmp, s->pc);
9236 store_reg(s, 15, tmp);
9238 int op2 = (insn >> 6) & 0x3;
9239 op = (insn >> 4) & 0x3;
9244 /* Load/store exclusive byte/halfword/doubleword */
9251 /* Load-acquire/store-release */
9257 /* Load-acquire/store-release exclusive */
9261 addr = tcg_temp_local_new_i32();
9262 load_reg_var(s, addr, rn);
9264 if (insn & (1 << 20)) {
9265 tmp = tcg_temp_new_i32();
9268 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9271 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9274 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9279 store_reg(s, rs, tmp);
9281 tmp = load_reg(s, rs);
9284 gen_aa32_st8(tmp, addr, get_mem_index(s));
9287 gen_aa32_st16(tmp, addr, get_mem_index(s));
9290 gen_aa32_st32(tmp, addr, get_mem_index(s));
9295 tcg_temp_free_i32(tmp);
9297 } else if (insn & (1 << 20)) {
9298 gen_load_exclusive(s, rs, rd, addr, op);
9300 gen_store_exclusive(s, rm, rs, rd, addr, op);
9302 tcg_temp_free_i32(addr);
9305 /* Load/store multiple, RFE, SRS. */
9306 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9307 /* RFE, SRS: not available in user mode or on M profile */
9308 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9311 if (insn & (1 << 20)) {
9313 addr = load_reg(s, rn);
9314 if ((insn & (1 << 24)) == 0)
9315 tcg_gen_addi_i32(addr, addr, -8);
9316 /* Load PC into tmp and CPSR into tmp2. */
9317 tmp = tcg_temp_new_i32();
9318 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9319 tcg_gen_addi_i32(addr, addr, 4);
9320 tmp2 = tcg_temp_new_i32();
9321 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9322 if (insn & (1 << 21)) {
9323 /* Base writeback. */
9324 if (insn & (1 << 24)) {
9325 tcg_gen_addi_i32(addr, addr, 4);
9327 tcg_gen_addi_i32(addr, addr, -4);
9329 store_reg(s, rn, addr);
9331 tcg_temp_free_i32(addr);
9333 gen_rfe(s, tmp, tmp2);
9336 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9340 int i, loaded_base = 0;
9341 TCGv_i32 loaded_var;
9342 /* Load/store multiple. */
9343 addr = load_reg(s, rn);
9345 for (i = 0; i < 16; i++) {
9346 if (insn & (1 << i))
9349 if (insn & (1 << 24)) {
9350 tcg_gen_addi_i32(addr, addr, -offset);
9353 TCGV_UNUSED_I32(loaded_var);
9354 for (i = 0; i < 16; i++) {
9355 if ((insn & (1 << i)) == 0)
9357 if (insn & (1 << 20)) {
9359 tmp = tcg_temp_new_i32();
9360 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9363 } else if (i == rn) {
9367 store_reg(s, i, tmp);
9371 tmp = load_reg(s, i);
9372 gen_aa32_st32(tmp, addr, get_mem_index(s));
9373 tcg_temp_free_i32(tmp);
9375 tcg_gen_addi_i32(addr, addr, 4);
9378 store_reg(s, rn, loaded_var);
9380 if (insn & (1 << 21)) {
9381 /* Base register writeback. */
9382 if (insn & (1 << 24)) {
9383 tcg_gen_addi_i32(addr, addr, -offset);
9385 /* Fault if writeback register is in register list. */
9386 if (insn & (1 << rn))
9388 store_reg(s, rn, addr);
9390 tcg_temp_free_i32(addr);
9397 op = (insn >> 21) & 0xf;
9399 /* Halfword pack. */
9400 tmp = load_reg(s, rn);
9401 tmp2 = load_reg(s, rm);
9402 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9403 if (insn & (1 << 5)) {
9407 tcg_gen_sari_i32(tmp2, tmp2, shift);
9408 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9409 tcg_gen_ext16u_i32(tmp2, tmp2);
9413 tcg_gen_shli_i32(tmp2, tmp2, shift);
9414 tcg_gen_ext16u_i32(tmp, tmp);
9415 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9417 tcg_gen_or_i32(tmp, tmp, tmp2);
9418 tcg_temp_free_i32(tmp2);
9419 store_reg(s, rd, tmp);
9421 /* Data processing register constant shift. */
9423 tmp = tcg_temp_new_i32();
9424 tcg_gen_movi_i32(tmp, 0);
9426 tmp = load_reg(s, rn);
9428 tmp2 = load_reg(s, rm);
9430 shiftop = (insn >> 4) & 3;
9431 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9432 conds = (insn & (1 << 20)) != 0;
9433 logic_cc = (conds && thumb2_logic_op(op));
9434 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9435 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9437 tcg_temp_free_i32(tmp2);
9439 store_reg(s, rd, tmp);
9441 tcg_temp_free_i32(tmp);
9445 case 13: /* Misc data processing. */
9446 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9447 if (op < 4 && (insn & 0xf000) != 0xf000)
9450 case 0: /* Register controlled shift. */
9451 tmp = load_reg(s, rn);
9452 tmp2 = load_reg(s, rm);
9453 if ((insn & 0x70) != 0)
9455 op = (insn >> 21) & 3;
9456 logic_cc = (insn & (1 << 20)) != 0;
9457 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9460 store_reg_bx(s, rd, tmp);
9462 case 1: /* Sign/zero extend. */
9463 tmp = load_reg(s, rm);
9464 shift = (insn >> 4) & 3;
9465 /* ??? In many cases it's not necessary to do a
9466 rotate, a shift is sufficient. */
9468 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9469 op = (insn >> 20) & 7;
9471 case 0: gen_sxth(tmp); break;
9472 case 1: gen_uxth(tmp); break;
9473 case 2: gen_sxtb16(tmp); break;
9474 case 3: gen_uxtb16(tmp); break;
9475 case 4: gen_sxtb(tmp); break;
9476 case 5: gen_uxtb(tmp); break;
9477 default: goto illegal_op;
9480 tmp2 = load_reg(s, rn);
9481 if ((op >> 1) == 1) {
9482 gen_add16(tmp, tmp2);
9484 tcg_gen_add_i32(tmp, tmp, tmp2);
9485 tcg_temp_free_i32(tmp2);
9488 store_reg(s, rd, tmp);
9490 case 2: /* SIMD add/subtract. */
9491 op = (insn >> 20) & 7;
9492 shift = (insn >> 4) & 7;
9493 if ((op & 3) == 3 || (shift & 3) == 3)
9495 tmp = load_reg(s, rn);
9496 tmp2 = load_reg(s, rm);
9497 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9498 tcg_temp_free_i32(tmp2);
9499 store_reg(s, rd, tmp);
9501 case 3: /* Other data processing. */
9502 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9504 /* Saturating add/subtract. */
9505 tmp = load_reg(s, rn);
9506 tmp2 = load_reg(s, rm);
9508 gen_helper_double_saturate(tmp, cpu_env, tmp);
9510 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9512 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9513 tcg_temp_free_i32(tmp2);
9515 tmp = load_reg(s, rn);
9517 case 0x0a: /* rbit */
9518 gen_helper_rbit(tmp, tmp);
9520 case 0x08: /* rev */
9521 tcg_gen_bswap32_i32(tmp, tmp);
9523 case 0x09: /* rev16 */
9526 case 0x0b: /* revsh */
9529 case 0x10: /* sel */
9530 tmp2 = load_reg(s, rm);
9531 tmp3 = tcg_temp_new_i32();
9532 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9533 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9534 tcg_temp_free_i32(tmp3);
9535 tcg_temp_free_i32(tmp2);
9537 case 0x18: /* clz */
9538 gen_helper_clz(tmp, tmp);
9548 uint32_t sz = op & 0x3;
9549 uint32_t c = op & 0x8;
9551 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
9555 tmp2 = load_reg(s, rm);
9557 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9558 } else if (sz == 1) {
9559 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9561 tmp3 = tcg_const_i32(1 << sz);
9563 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9565 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9567 tcg_temp_free_i32(tmp2);
9568 tcg_temp_free_i32(tmp3);
9575 store_reg(s, rd, tmp);
9577 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9578 op = (insn >> 4) & 0xf;
9579 tmp = load_reg(s, rn);
9580 tmp2 = load_reg(s, rm);
9581 switch ((insn >> 20) & 7) {
9582 case 0: /* 32 x 32 -> 32 */
9583 tcg_gen_mul_i32(tmp, tmp, tmp2);
9584 tcg_temp_free_i32(tmp2);
9586 tmp2 = load_reg(s, rs);
9588 tcg_gen_sub_i32(tmp, tmp2, tmp);
9590 tcg_gen_add_i32(tmp, tmp, tmp2);
9591 tcg_temp_free_i32(tmp2);
9594 case 1: /* 16 x 16 -> 32 */
9595 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9596 tcg_temp_free_i32(tmp2);
9598 tmp2 = load_reg(s, rs);
9599 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9600 tcg_temp_free_i32(tmp2);
9603 case 2: /* Dual multiply add. */
9604 case 4: /* Dual multiply subtract. */
9606 gen_swap_half(tmp2);
9607 gen_smul_dual(tmp, tmp2);
9608 if (insn & (1 << 22)) {
9609 /* This subtraction cannot overflow. */
9610 tcg_gen_sub_i32(tmp, tmp, tmp2);
9612 /* This addition cannot overflow 32 bits;
9613 * however it may overflow considered as a signed
9614 * operation, in which case we must set the Q flag.
9616 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9618 tcg_temp_free_i32(tmp2);
9621 tmp2 = load_reg(s, rs);
9622 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9623 tcg_temp_free_i32(tmp2);
9626 case 3: /* 32 * 16 -> 32msb */
9628 tcg_gen_sari_i32(tmp2, tmp2, 16);
9631 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9632 tcg_gen_shri_i64(tmp64, tmp64, 16);
9633 tmp = tcg_temp_new_i32();
9634 tcg_gen_trunc_i64_i32(tmp, tmp64);
9635 tcg_temp_free_i64(tmp64);
9638 tmp2 = load_reg(s, rs);
9639 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9640 tcg_temp_free_i32(tmp2);
9643 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9644 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9646 tmp = load_reg(s, rs);
9647 if (insn & (1 << 20)) {
9648 tmp64 = gen_addq_msw(tmp64, tmp);
9650 tmp64 = gen_subq_msw(tmp64, tmp);
9653 if (insn & (1 << 4)) {
9654 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9656 tcg_gen_shri_i64(tmp64, tmp64, 32);
9657 tmp = tcg_temp_new_i32();
9658 tcg_gen_trunc_i64_i32(tmp, tmp64);
9659 tcg_temp_free_i64(tmp64);
9661 case 7: /* Unsigned sum of absolute differences. */
9662 gen_helper_usad8(tmp, tmp, tmp2);
9663 tcg_temp_free_i32(tmp2);
9665 tmp2 = load_reg(s, rs);
9666 tcg_gen_add_i32(tmp, tmp, tmp2);
9667 tcg_temp_free_i32(tmp2);
9671 store_reg(s, rd, tmp);
9673 case 6: case 7: /* 64-bit multiply, Divide. */
9674 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
9675 tmp = load_reg(s, rn);
9676 tmp2 = load_reg(s, rm);
9677 if ((op & 0x50) == 0x10) {
9679 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9683 gen_helper_udiv(tmp, tmp, tmp2);
9685 gen_helper_sdiv(tmp, tmp, tmp2);
9686 tcg_temp_free_i32(tmp2);
9687 store_reg(s, rd, tmp);
9688 } else if ((op & 0xe) == 0xc) {
9689 /* Dual multiply accumulate long. */
9691 gen_swap_half(tmp2);
9692 gen_smul_dual(tmp, tmp2);
9694 tcg_gen_sub_i32(tmp, tmp, tmp2);
9696 tcg_gen_add_i32(tmp, tmp, tmp2);
9698 tcg_temp_free_i32(tmp2);
9700 tmp64 = tcg_temp_new_i64();
9701 tcg_gen_ext_i32_i64(tmp64, tmp);
9702 tcg_temp_free_i32(tmp);
9703 gen_addq(s, tmp64, rs, rd);
9704 gen_storeq_reg(s, rs, rd, tmp64);
9705 tcg_temp_free_i64(tmp64);
9708 /* Unsigned 64-bit multiply */
9709 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9713 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9714 tcg_temp_free_i32(tmp2);
9715 tmp64 = tcg_temp_new_i64();
9716 tcg_gen_ext_i32_i64(tmp64, tmp);
9717 tcg_temp_free_i32(tmp);
9719 /* Signed 64-bit multiply */
9720 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9725 gen_addq_lo(s, tmp64, rs);
9726 gen_addq_lo(s, tmp64, rd);
9727 } else if (op & 0x40) {
9728 /* 64-bit accumulate. */
9729 gen_addq(s, tmp64, rs, rd);
9731 gen_storeq_reg(s, rs, rd, tmp64);
9732 tcg_temp_free_i64(tmp64);
9737 case 6: case 7: case 14: case 15:
9739 if (((insn >> 24) & 3) == 3) {
9740 /* Translate into the equivalent ARM encoding. */
9741 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9742 if (disas_neon_data_insn(s, insn)) {
9745 } else if (((insn >> 8) & 0xe) == 10) {
9746 if (disas_vfp_insn(s, insn)) {
9750 if (insn & (1 << 28))
9752 if (disas_coproc_insn(s, insn)) {
9757 case 8: case 9: case 10: case 11:
9758 if (insn & (1 << 15)) {
9759 /* Branches, misc control. */
9760 if (insn & 0x5000) {
9761 /* Unconditional branch. */
9762 /* signextend(hw1[10:0]) -> offset[:12]. */
9763 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9764 /* hw1[10:0] -> offset[11:1]. */
9765 offset |= (insn & 0x7ff) << 1;
9766 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9767 offset[24:22] already have the same value because of the
9768 sign extension above. */
9769 offset ^= ((~insn) & (1 << 13)) << 10;
9770 offset ^= ((~insn) & (1 << 11)) << 11;
9772 if (insn & (1 << 14)) {
9773 /* Branch and link. */
9774 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
9778 if (insn & (1 << 12)) {
9783 offset &= ~(uint32_t)2;
9784 /* thumb2 bx, no need to check */
9785 gen_bx_im(s, offset);
9787 } else if (((insn >> 23) & 7) == 7) {
9789 if (insn & (1 << 13))
9792 if (insn & (1 << 26)) {
9793 if (!(insn & (1 << 20))) {
9794 /* Hypervisor call (v7) */
9795 int imm16 = extract32(insn, 16, 4) << 12
9796 | extract32(insn, 0, 12);
9803 /* Secure monitor call (v6+) */
9811 op = (insn >> 20) & 7;
9813 case 0: /* msr cpsr. */
9814 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9815 tmp = load_reg(s, rn);
9816 addr = tcg_const_i32(insn & 0xff);
9817 gen_helper_v7m_msr(cpu_env, addr, tmp);
9818 tcg_temp_free_i32(addr);
9819 tcg_temp_free_i32(tmp);
9824 case 1: /* msr spsr. */
9825 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9828 tmp = load_reg(s, rn);
9830 msr_mask(s, (insn >> 8) & 0xf, op == 1),
9834 case 2: /* cps, nop-hint. */
9835 if (((insn >> 8) & 7) == 0) {
9836 gen_nop_hint(s, insn & 0xff);
9838 /* Implemented as NOP in user mode. */
9843 if (insn & (1 << 10)) {
9844 if (insn & (1 << 7))
9846 if (insn & (1 << 6))
9848 if (insn & (1 << 5))
9850 if (insn & (1 << 9))
9851 imm = CPSR_A | CPSR_I | CPSR_F;
9853 if (insn & (1 << 8)) {
9855 imm |= (insn & 0x1f);
9858 gen_set_psr_im(s, offset, 0, imm);
9861 case 3: /* Special control operations. */
9863 op = (insn >> 4) & 0xf;
9871 /* These execute as NOPs. */
9878 /* Trivial implementation equivalent to bx. */
9879 tmp = load_reg(s, rn);
9882 case 5: /* Exception return. */
9886 if (rn != 14 || rd != 15) {
9889 tmp = load_reg(s, rn);
9890 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9891 gen_exception_return(s, tmp);
9893 case 6: /* mrs cpsr. */
9894 tmp = tcg_temp_new_i32();
9895 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9896 addr = tcg_const_i32(insn & 0xff);
9897 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9898 tcg_temp_free_i32(addr);
9900 gen_helper_cpsr_read(tmp, cpu_env);
9902 store_reg(s, rd, tmp);
9904 case 7: /* mrs spsr. */
9905 /* Not accessible in user mode. */
9906 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9909 tmp = load_cpu_field(spsr);
9910 store_reg(s, rd, tmp);
9915 /* Conditional branch. */
9916 op = (insn >> 22) & 0xf;
9917 /* Generate a conditional jump to next instruction. */
9918 s->condlabel = gen_new_label();
9919 arm_gen_test_cc(op ^ 1, s->condlabel);
9922 /* offset[11:1] = insn[10:0] */
9923 offset = (insn & 0x7ff) << 1;
9924 /* offset[17:12] = insn[21:16]. */
9925 offset |= (insn & 0x003f0000) >> 4;
9926 /* offset[31:20] = insn[26]. */
9927 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9928 /* offset[18] = insn[13]. */
9929 offset |= (insn & (1 << 13)) << 5;
9930 /* offset[19] = insn[11]. */
9931 offset |= (insn & (1 << 11)) << 8;
9933 /* jump to the offset */
9934 gen_jmp(s, s->pc + offset);
9937 /* Data processing immediate. */
9938 if (insn & (1 << 25)) {
9939 if (insn & (1 << 24)) {
9940 if (insn & (1 << 20))
9942 /* Bitfield/Saturate. */
9943 op = (insn >> 21) & 7;
9945 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9947 tmp = tcg_temp_new_i32();
9948 tcg_gen_movi_i32(tmp, 0);
9950 tmp = load_reg(s, rn);
9953 case 2: /* Signed bitfield extract. */
9955 if (shift + imm > 32)
9958 gen_sbfx(tmp, shift, imm);
9960 case 6: /* Unsigned bitfield extract. */
9962 if (shift + imm > 32)
9965 gen_ubfx(tmp, shift, (1u << imm) - 1);
9967 case 3: /* Bitfield insert/clear. */
9970 imm = imm + 1 - shift;
9972 tmp2 = load_reg(s, rd);
9973 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
9974 tcg_temp_free_i32(tmp2);
9979 default: /* Saturate. */
9982 tcg_gen_sari_i32(tmp, tmp, shift);
9984 tcg_gen_shli_i32(tmp, tmp, shift);
9986 tmp2 = tcg_const_i32(imm);
9989 if ((op & 1) && shift == 0)
9990 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9992 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9995 if ((op & 1) && shift == 0)
9996 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9998 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10000 tcg_temp_free_i32(tmp2);
10003 store_reg(s, rd, tmp);
10005 imm = ((insn & 0x04000000) >> 15)
10006 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10007 if (insn & (1 << 22)) {
10008 /* 16-bit immediate. */
10009 imm |= (insn >> 4) & 0xf000;
10010 if (insn & (1 << 23)) {
10012 tmp = load_reg(s, rd);
10013 tcg_gen_ext16u_i32(tmp, tmp);
10014 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10017 tmp = tcg_temp_new_i32();
10018 tcg_gen_movi_i32(tmp, imm);
10021 /* Add/sub 12-bit immediate. */
10023 offset = s->pc & ~(uint32_t)3;
10024 if (insn & (1 << 23))
10028 tmp = tcg_temp_new_i32();
10029 tcg_gen_movi_i32(tmp, offset);
10031 tmp = load_reg(s, rn);
10032 if (insn & (1 << 23))
10033 tcg_gen_subi_i32(tmp, tmp, imm);
10035 tcg_gen_addi_i32(tmp, tmp, imm);
10038 store_reg(s, rd, tmp);
10041 int shifter_out = 0;
10042 /* modified 12-bit immediate. */
10043 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10044 imm = (insn & 0xff);
10047 /* Nothing to do. */
10049 case 1: /* 00XY00XY */
10052 case 2: /* XY00XY00 */
10056 case 3: /* XYXYXYXY */
10060 default: /* Rotated constant. */
10061 shift = (shift << 1) | (imm >> 7);
10063 imm = imm << (32 - shift);
10067 tmp2 = tcg_temp_new_i32();
10068 tcg_gen_movi_i32(tmp2, imm);
10069 rn = (insn >> 16) & 0xf;
10071 tmp = tcg_temp_new_i32();
10072 tcg_gen_movi_i32(tmp, 0);
10074 tmp = load_reg(s, rn);
10076 op = (insn >> 21) & 0xf;
10077 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10078 shifter_out, tmp, tmp2))
10080 tcg_temp_free_i32(tmp2);
10081 rd = (insn >> 8) & 0xf;
10083 store_reg(s, rd, tmp);
10085 tcg_temp_free_i32(tmp);
10090 case 12: /* Load/store single data item. */
10095 if ((insn & 0x01100000) == 0x01000000) {
10096 if (disas_neon_ls_insn(s, insn)) {
10101 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10103 if (!(insn & (1 << 20))) {
10107 /* Byte or halfword load space with dest == r15 : memory hints.
10108 * Catch them early so we don't emit pointless addressing code.
10109 * This space is a mix of:
10110 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10111 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10113 * unallocated hints, which must be treated as NOPs
10114 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10115 * which is easiest for the decoding logic
10116 * Some space which must UNDEF
10118 int op1 = (insn >> 23) & 3;
10119 int op2 = (insn >> 6) & 0x3f;
10124 /* UNPREDICTABLE, unallocated hint or
10125 * PLD/PLDW/PLI (literal)
10130 return 0; /* PLD/PLDW/PLI or unallocated hint */
10132 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10133 return 0; /* PLD/PLDW/PLI or unallocated hint */
10135 /* UNDEF space, or an UNPREDICTABLE */
10139 memidx = get_mem_index(s);
10141 addr = tcg_temp_new_i32();
10143 /* s->pc has already been incremented by 4. */
10144 imm = s->pc & 0xfffffffc;
10145 if (insn & (1 << 23))
10146 imm += insn & 0xfff;
10148 imm -= insn & 0xfff;
10149 tcg_gen_movi_i32(addr, imm);
10151 addr = load_reg(s, rn);
10152 if (insn & (1 << 23)) {
10153 /* Positive offset. */
10154 imm = insn & 0xfff;
10155 tcg_gen_addi_i32(addr, addr, imm);
10158 switch ((insn >> 8) & 0xf) {
10159 case 0x0: /* Shifted Register. */
10160 shift = (insn >> 4) & 0xf;
10162 tcg_temp_free_i32(addr);
10165 tmp = load_reg(s, rm);
10167 tcg_gen_shli_i32(tmp, tmp, shift);
10168 tcg_gen_add_i32(addr, addr, tmp);
10169 tcg_temp_free_i32(tmp);
10171 case 0xc: /* Negative offset. */
10172 tcg_gen_addi_i32(addr, addr, -imm);
10174 case 0xe: /* User privilege. */
10175 tcg_gen_addi_i32(addr, addr, imm);
10176 memidx = MMU_USER_IDX;
10178 case 0x9: /* Post-decrement. */
10180 /* Fall through. */
10181 case 0xb: /* Post-increment. */
10185 case 0xd: /* Pre-decrement. */
10187 /* Fall through. */
10188 case 0xf: /* Pre-increment. */
10189 tcg_gen_addi_i32(addr, addr, imm);
10193 tcg_temp_free_i32(addr);
10198 if (insn & (1 << 20)) {
10200 tmp = tcg_temp_new_i32();
10203 gen_aa32_ld8u(tmp, addr, memidx);
10206 gen_aa32_ld8s(tmp, addr, memidx);
10209 gen_aa32_ld16u(tmp, addr, memidx);
10212 gen_aa32_ld16s(tmp, addr, memidx);
10215 gen_aa32_ld32u(tmp, addr, memidx);
10218 tcg_temp_free_i32(tmp);
10219 tcg_temp_free_i32(addr);
10225 store_reg(s, rs, tmp);
10229 tmp = load_reg(s, rs);
10232 gen_aa32_st8(tmp, addr, memidx);
10235 gen_aa32_st16(tmp, addr, memidx);
10238 gen_aa32_st32(tmp, addr, memidx);
10241 tcg_temp_free_i32(tmp);
10242 tcg_temp_free_i32(addr);
10245 tcg_temp_free_i32(tmp);
10248 tcg_gen_addi_i32(addr, addr, imm);
10250 store_reg(s, rn, addr);
10252 tcg_temp_free_i32(addr);
10264 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10266 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10273 if (s->condexec_mask) {
10274 cond = s->condexec_cond;
10275 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10276 s->condlabel = gen_new_label();
10277 arm_gen_test_cc(cond ^ 1, s->condlabel);
10282 insn = arm_lduw_code(env, s->pc, s->bswap_code);
10285 switch (insn >> 12) {
10289 op = (insn >> 11) & 3;
10292 rn = (insn >> 3) & 7;
10293 tmp = load_reg(s, rn);
10294 if (insn & (1 << 10)) {
10296 tmp2 = tcg_temp_new_i32();
10297 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10300 rm = (insn >> 6) & 7;
10301 tmp2 = load_reg(s, rm);
10303 if (insn & (1 << 9)) {
10304 if (s->condexec_mask)
10305 tcg_gen_sub_i32(tmp, tmp, tmp2);
10307 gen_sub_CC(tmp, tmp, tmp2);
10309 if (s->condexec_mask)
10310 tcg_gen_add_i32(tmp, tmp, tmp2);
10312 gen_add_CC(tmp, tmp, tmp2);
10314 tcg_temp_free_i32(tmp2);
10315 store_reg(s, rd, tmp);
10317 /* shift immediate */
10318 rm = (insn >> 3) & 7;
10319 shift = (insn >> 6) & 0x1f;
10320 tmp = load_reg(s, rm);
10321 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10322 if (!s->condexec_mask)
10324 store_reg(s, rd, tmp);
10328 /* arithmetic large immediate */
10329 op = (insn >> 11) & 3;
10330 rd = (insn >> 8) & 0x7;
10331 if (op == 0) { /* mov */
10332 tmp = tcg_temp_new_i32();
10333 tcg_gen_movi_i32(tmp, insn & 0xff);
10334 if (!s->condexec_mask)
10336 store_reg(s, rd, tmp);
10338 tmp = load_reg(s, rd);
10339 tmp2 = tcg_temp_new_i32();
10340 tcg_gen_movi_i32(tmp2, insn & 0xff);
10343 gen_sub_CC(tmp, tmp, tmp2);
10344 tcg_temp_free_i32(tmp);
10345 tcg_temp_free_i32(tmp2);
10348 if (s->condexec_mask)
10349 tcg_gen_add_i32(tmp, tmp, tmp2);
10351 gen_add_CC(tmp, tmp, tmp2);
10352 tcg_temp_free_i32(tmp2);
10353 store_reg(s, rd, tmp);
10356 if (s->condexec_mask)
10357 tcg_gen_sub_i32(tmp, tmp, tmp2);
10359 gen_sub_CC(tmp, tmp, tmp2);
10360 tcg_temp_free_i32(tmp2);
10361 store_reg(s, rd, tmp);
10367 if (insn & (1 << 11)) {
10368 rd = (insn >> 8) & 7;
10369 /* load pc-relative. Bit 1 of PC is ignored. */
10370 val = s->pc + 2 + ((insn & 0xff) * 4);
10371 val &= ~(uint32_t)2;
10372 addr = tcg_temp_new_i32();
10373 tcg_gen_movi_i32(addr, val);
10374 tmp = tcg_temp_new_i32();
10375 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10376 tcg_temp_free_i32(addr);
10377 store_reg(s, rd, tmp);
10380 if (insn & (1 << 10)) {
10381 /* data processing extended or blx */
10382 rd = (insn & 7) | ((insn >> 4) & 8);
10383 rm = (insn >> 3) & 0xf;
10384 op = (insn >> 8) & 3;
10387 tmp = load_reg(s, rd);
10388 tmp2 = load_reg(s, rm);
10389 tcg_gen_add_i32(tmp, tmp, tmp2);
10390 tcg_temp_free_i32(tmp2);
10391 store_reg(s, rd, tmp);
10394 tmp = load_reg(s, rd);
10395 tmp2 = load_reg(s, rm);
10396 gen_sub_CC(tmp, tmp, tmp2);
10397 tcg_temp_free_i32(tmp2);
10398 tcg_temp_free_i32(tmp);
10400 case 2: /* mov/cpy */
10401 tmp = load_reg(s, rm);
10402 store_reg(s, rd, tmp);
10404 case 3:/* branch [and link] exchange thumb register */
10405 tmp = load_reg(s, rm);
10406 if (insn & (1 << 7)) {
10408 val = (uint32_t)s->pc | 1;
10409 tmp2 = tcg_temp_new_i32();
10410 tcg_gen_movi_i32(tmp2, val);
10411 store_reg(s, 14, tmp2);
10413 /* already thumb, no need to check */
10420 /* data processing register */
10422 rm = (insn >> 3) & 7;
10423 op = (insn >> 6) & 0xf;
10424 if (op == 2 || op == 3 || op == 4 || op == 7) {
10425 /* the shift/rotate ops want the operands backwards */
10434 if (op == 9) { /* neg */
10435 tmp = tcg_temp_new_i32();
10436 tcg_gen_movi_i32(tmp, 0);
10437 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10438 tmp = load_reg(s, rd);
10440 TCGV_UNUSED_I32(tmp);
10443 tmp2 = load_reg(s, rm);
10445 case 0x0: /* and */
10446 tcg_gen_and_i32(tmp, tmp, tmp2);
10447 if (!s->condexec_mask)
10450 case 0x1: /* eor */
10451 tcg_gen_xor_i32(tmp, tmp, tmp2);
10452 if (!s->condexec_mask)
10455 case 0x2: /* lsl */
10456 if (s->condexec_mask) {
10457 gen_shl(tmp2, tmp2, tmp);
10459 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
10460 gen_logic_CC(tmp2);
10463 case 0x3: /* lsr */
10464 if (s->condexec_mask) {
10465 gen_shr(tmp2, tmp2, tmp);
10467 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
10468 gen_logic_CC(tmp2);
10471 case 0x4: /* asr */
10472 if (s->condexec_mask) {
10473 gen_sar(tmp2, tmp2, tmp);
10475 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
10476 gen_logic_CC(tmp2);
10479 case 0x5: /* adc */
10480 if (s->condexec_mask) {
10481 gen_adc(tmp, tmp2);
10483 gen_adc_CC(tmp, tmp, tmp2);
10486 case 0x6: /* sbc */
10487 if (s->condexec_mask) {
10488 gen_sub_carry(tmp, tmp, tmp2);
10490 gen_sbc_CC(tmp, tmp, tmp2);
10493 case 0x7: /* ror */
10494 if (s->condexec_mask) {
10495 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10496 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
10498 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
10499 gen_logic_CC(tmp2);
10502 case 0x8: /* tst */
10503 tcg_gen_and_i32(tmp, tmp, tmp2);
10507 case 0x9: /* neg */
10508 if (s->condexec_mask)
10509 tcg_gen_neg_i32(tmp, tmp2);
10511 gen_sub_CC(tmp, tmp, tmp2);
10513 case 0xa: /* cmp */
10514 gen_sub_CC(tmp, tmp, tmp2);
10517 case 0xb: /* cmn */
10518 gen_add_CC(tmp, tmp, tmp2);
10521 case 0xc: /* orr */
10522 tcg_gen_or_i32(tmp, tmp, tmp2);
10523 if (!s->condexec_mask)
10526 case 0xd: /* mul */
10527 tcg_gen_mul_i32(tmp, tmp, tmp2);
10528 if (!s->condexec_mask)
10531 case 0xe: /* bic */
10532 tcg_gen_andc_i32(tmp, tmp, tmp2);
10533 if (!s->condexec_mask)
10536 case 0xf: /* mvn */
10537 tcg_gen_not_i32(tmp2, tmp2);
10538 if (!s->condexec_mask)
10539 gen_logic_CC(tmp2);
10546 store_reg(s, rm, tmp2);
10548 tcg_temp_free_i32(tmp);
10550 store_reg(s, rd, tmp);
10551 tcg_temp_free_i32(tmp2);
10554 tcg_temp_free_i32(tmp);
10555 tcg_temp_free_i32(tmp2);
10560 /* load/store register offset. */
10562 rn = (insn >> 3) & 7;
10563 rm = (insn >> 6) & 7;
10564 op = (insn >> 9) & 7;
10565 addr = load_reg(s, rn);
10566 tmp = load_reg(s, rm);
10567 tcg_gen_add_i32(addr, addr, tmp);
10568 tcg_temp_free_i32(tmp);
10570 if (op < 3) { /* store */
10571 tmp = load_reg(s, rd);
10573 tmp = tcg_temp_new_i32();
10578 gen_aa32_st32(tmp, addr, get_mem_index(s));
10581 gen_aa32_st16(tmp, addr, get_mem_index(s));
10584 gen_aa32_st8(tmp, addr, get_mem_index(s));
10586 case 3: /* ldrsb */
10587 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
10590 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10593 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10596 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10598 case 7: /* ldrsh */
10599 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
10602 if (op >= 3) { /* load */
10603 store_reg(s, rd, tmp);
10605 tcg_temp_free_i32(tmp);
10607 tcg_temp_free_i32(addr);
10611 /* load/store word immediate offset */
10613 rn = (insn >> 3) & 7;
10614 addr = load_reg(s, rn);
10615 val = (insn >> 4) & 0x7c;
10616 tcg_gen_addi_i32(addr, addr, val);
10618 if (insn & (1 << 11)) {
10620 tmp = tcg_temp_new_i32();
10621 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10622 store_reg(s, rd, tmp);
10625 tmp = load_reg(s, rd);
10626 gen_aa32_st32(tmp, addr, get_mem_index(s));
10627 tcg_temp_free_i32(tmp);
10629 tcg_temp_free_i32(addr);
10633 /* load/store byte immediate offset */
10635 rn = (insn >> 3) & 7;
10636 addr = load_reg(s, rn);
10637 val = (insn >> 6) & 0x1f;
10638 tcg_gen_addi_i32(addr, addr, val);
10640 if (insn & (1 << 11)) {
10642 tmp = tcg_temp_new_i32();
10643 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10644 store_reg(s, rd, tmp);
10647 tmp = load_reg(s, rd);
10648 gen_aa32_st8(tmp, addr, get_mem_index(s));
10649 tcg_temp_free_i32(tmp);
10651 tcg_temp_free_i32(addr);
10655 /* load/store halfword immediate offset */
10657 rn = (insn >> 3) & 7;
10658 addr = load_reg(s, rn);
10659 val = (insn >> 5) & 0x3e;
10660 tcg_gen_addi_i32(addr, addr, val);
10662 if (insn & (1 << 11)) {
10664 tmp = tcg_temp_new_i32();
10665 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10666 store_reg(s, rd, tmp);
10669 tmp = load_reg(s, rd);
10670 gen_aa32_st16(tmp, addr, get_mem_index(s));
10671 tcg_temp_free_i32(tmp);
10673 tcg_temp_free_i32(addr);
10677 /* load/store from stack */
10678 rd = (insn >> 8) & 7;
10679 addr = load_reg(s, 13);
10680 val = (insn & 0xff) * 4;
10681 tcg_gen_addi_i32(addr, addr, val);
10683 if (insn & (1 << 11)) {
10685 tmp = tcg_temp_new_i32();
10686 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10687 store_reg(s, rd, tmp);
10690 tmp = load_reg(s, rd);
10691 gen_aa32_st32(tmp, addr, get_mem_index(s));
10692 tcg_temp_free_i32(tmp);
10694 tcg_temp_free_i32(addr);
10698 /* add to high reg */
10699 rd = (insn >> 8) & 7;
10700 if (insn & (1 << 11)) {
10702 tmp = load_reg(s, 13);
10704 /* PC. bit 1 is ignored. */
10705 tmp = tcg_temp_new_i32();
10706 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
10708 val = (insn & 0xff) * 4;
10709 tcg_gen_addi_i32(tmp, tmp, val);
10710 store_reg(s, rd, tmp);
10715 op = (insn >> 8) & 0xf;
10718 /* adjust stack pointer */
10719 tmp = load_reg(s, 13);
10720 val = (insn & 0x7f) * 4;
10721 if (insn & (1 << 7))
10722 val = -(int32_t)val;
10723 tcg_gen_addi_i32(tmp, tmp, val);
10724 store_reg(s, 13, tmp);
10727 case 2: /* sign/zero extend. */
10730 rm = (insn >> 3) & 7;
10731 tmp = load_reg(s, rm);
10732 switch ((insn >> 6) & 3) {
10733 case 0: gen_sxth(tmp); break;
10734 case 1: gen_sxtb(tmp); break;
10735 case 2: gen_uxth(tmp); break;
10736 case 3: gen_uxtb(tmp); break;
10738 store_reg(s, rd, tmp);
10740 case 4: case 5: case 0xc: case 0xd:
10742 addr = load_reg(s, 13);
10743 if (insn & (1 << 8))
10747 for (i = 0; i < 8; i++) {
10748 if (insn & (1 << i))
10751 if ((insn & (1 << 11)) == 0) {
10752 tcg_gen_addi_i32(addr, addr, -offset);
10754 for (i = 0; i < 8; i++) {
10755 if (insn & (1 << i)) {
10756 if (insn & (1 << 11)) {
10758 tmp = tcg_temp_new_i32();
10759 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10760 store_reg(s, i, tmp);
10763 tmp = load_reg(s, i);
10764 gen_aa32_st32(tmp, addr, get_mem_index(s));
10765 tcg_temp_free_i32(tmp);
10767 /* advance to the next address. */
10768 tcg_gen_addi_i32(addr, addr, 4);
10771 TCGV_UNUSED_I32(tmp);
10772 if (insn & (1 << 8)) {
10773 if (insn & (1 << 11)) {
10775 tmp = tcg_temp_new_i32();
10776 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10777 /* don't set the pc until the rest of the instruction
10781 tmp = load_reg(s, 14);
10782 gen_aa32_st32(tmp, addr, get_mem_index(s));
10783 tcg_temp_free_i32(tmp);
10785 tcg_gen_addi_i32(addr, addr, 4);
10787 if ((insn & (1 << 11)) == 0) {
10788 tcg_gen_addi_i32(addr, addr, -offset);
10790 /* write back the new stack pointer */
10791 store_reg(s, 13, addr);
10792 /* set the new PC value */
10793 if ((insn & 0x0900) == 0x0900) {
10794 store_reg_from_load(s, 15, tmp);
10798 case 1: case 3: case 9: case 11: /* czb */
10800 tmp = load_reg(s, rm);
10801 s->condlabel = gen_new_label();
10803 if (insn & (1 << 11))
10804 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
10806 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
10807 tcg_temp_free_i32(tmp);
10808 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10809 val = (uint32_t)s->pc + 2;
10814 case 15: /* IT, nop-hint. */
10815 if ((insn & 0xf) == 0) {
10816 gen_nop_hint(s, (insn >> 4) & 0xf);
10820 s->condexec_cond = (insn >> 4) & 0xe;
10821 s->condexec_mask = insn & 0x1f;
10822 /* No actual code generated for this insn, just setup state. */
10825 case 0xe: /* bkpt */
10827 int imm8 = extract32(insn, 0, 8);
10829 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
10833 case 0xa: /* rev */
10835 rn = (insn >> 3) & 0x7;
10837 tmp = load_reg(s, rn);
10838 switch ((insn >> 6) & 3) {
10839 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
10840 case 1: gen_rev16(tmp); break;
10841 case 3: gen_revsh(tmp); break;
10842 default: goto illegal_op;
10844 store_reg(s, rd, tmp);
10848 switch ((insn >> 5) & 7) {
10852 if (((insn >> 3) & 1) != s->bswap_code) {
10853 /* Dynamic endianness switching not implemented. */
10854 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
10864 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10865 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10868 addr = tcg_const_i32(19);
10869 gen_helper_v7m_msr(cpu_env, addr, tmp);
10870 tcg_temp_free_i32(addr);
10874 addr = tcg_const_i32(16);
10875 gen_helper_v7m_msr(cpu_env, addr, tmp);
10876 tcg_temp_free_i32(addr);
10878 tcg_temp_free_i32(tmp);
10881 if (insn & (1 << 4)) {
10882 shift = CPSR_A | CPSR_I | CPSR_F;
10886 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
10901 /* load/store multiple */
10902 TCGv_i32 loaded_var;
10903 TCGV_UNUSED_I32(loaded_var);
10904 rn = (insn >> 8) & 0x7;
10905 addr = load_reg(s, rn);
10906 for (i = 0; i < 8; i++) {
10907 if (insn & (1 << i)) {
10908 if (insn & (1 << 11)) {
10910 tmp = tcg_temp_new_i32();
10911 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10915 store_reg(s, i, tmp);
10919 tmp = load_reg(s, i);
10920 gen_aa32_st32(tmp, addr, get_mem_index(s));
10921 tcg_temp_free_i32(tmp);
10923 /* advance to the next address */
10924 tcg_gen_addi_i32(addr, addr, 4);
10927 if ((insn & (1 << rn)) == 0) {
10928 /* base reg not in list: base register writeback */
10929 store_reg(s, rn, addr);
10931 /* base reg in list: if load, complete it now */
10932 if (insn & (1 << 11)) {
10933 store_reg(s, rn, loaded_var);
10935 tcg_temp_free_i32(addr);
10940 /* conditional branch or swi */
10941 cond = (insn >> 8) & 0xf;
10947 gen_set_pc_im(s, s->pc);
10948 s->svc_imm = extract32(insn, 0, 8);
10949 s->is_jmp = DISAS_SWI;
10952 /* generate a conditional jump to next instruction */
10953 s->condlabel = gen_new_label();
10954 arm_gen_test_cc(cond ^ 1, s->condlabel);
10957 /* jump to the offset */
10958 val = (uint32_t)s->pc + 2;
10959 offset = ((int32_t)insn << 24) >> 24;
10960 val += offset << 1;
10965 if (insn & (1 << 11)) {
10966 if (disas_thumb2_insn(env, s, insn))
10970 /* unconditional branch */
10971 val = (uint32_t)s->pc;
10972 offset = ((int32_t)insn << 21) >> 21;
10973 val += (offset << 1) + 2;
10978 if (disas_thumb2_insn(env, s, insn))
10984 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
10988 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
10991 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10992 basic block 'tb'. If search_pc is TRUE, also generate PC
10993 information for each intermediate instruction. */
10994 static inline void gen_intermediate_code_internal(ARMCPU *cpu,
10995 TranslationBlock *tb,
10998 CPUState *cs = CPU(cpu);
10999 CPUARMState *env = &cpu->env;
11000 DisasContext dc1, *dc = &dc1;
11002 uint16_t *gen_opc_end;
11004 target_ulong pc_start;
11005 target_ulong next_page_start;
11009 /* generate intermediate code */
11011 /* The A64 decoder has its own top level loop, because it doesn't need
11012 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11014 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11015 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
11023 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
11025 dc->is_jmp = DISAS_NEXT;
11027 dc->singlestep_enabled = cs->singlestep_enabled;
11031 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11032 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11033 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11034 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11035 #if !defined(CONFIG_USER_ONLY)
11036 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
11038 dc->ns = ARM_TBFLAG_NS(tb->flags);
11039 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
11040 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11041 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11042 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11043 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11044 dc->cp_regs = cpu->cp_regs;
11045 dc->current_el = arm_current_el(env);
11046 dc->features = env->features;
11048 /* Single step state. The code-generation logic here is:
11050 * generate code with no special handling for single-stepping (except
11051 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11052 * this happens anyway because those changes are all system register or
11054 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11055 * emit code for one insn
11056 * emit code to clear PSTATE.SS
11057 * emit code to generate software step exception for completed step
11058 * end TB (as usual for having generated an exception)
11059 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11060 * emit code to generate a software step exception
11063 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11064 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11065 dc->is_ldex = false;
11066 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11068 cpu_F0s = tcg_temp_new_i32();
11069 cpu_F1s = tcg_temp_new_i32();
11070 cpu_F0d = tcg_temp_new_i64();
11071 cpu_F1d = tcg_temp_new_i64();
11074 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11075 cpu_M0 = tcg_temp_new_i64();
11076 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11079 max_insns = tb->cflags & CF_COUNT_MASK;
11080 if (max_insns == 0)
11081 max_insns = CF_COUNT_MASK;
11085 tcg_clear_temp_count();
11087 /* A note on handling of the condexec (IT) bits:
11089 * We want to avoid the overhead of having to write the updated condexec
11090 * bits back to the CPUARMState for every instruction in an IT block. So:
11091 * (1) if the condexec bits are not already zero then we write
11092 * zero back into the CPUARMState now. This avoids complications trying
11093 * to do it at the end of the block. (For example if we don't do this
11094 * it's hard to identify whether we can safely skip writing condexec
11095 * at the end of the TB, which we definitely want to do for the case
11096 * where a TB doesn't do anything with the IT state at all.)
11097 * (2) if we are going to leave the TB then we call gen_set_condexec()
11098 * which will write the correct value into CPUARMState if zero is wrong.
11099 * This is done both for leaving the TB at the end, and for leaving
11100 * it because of an exception we know will happen, which is done in
11101 * gen_exception_insn(). The latter is necessary because we need to
11102 * leave the TB with the PC/IT state just prior to execution of the
11103 * instruction which caused the exception.
11104 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11105 * then the CPUARMState will be wrong and we need to reset it.
11106 * This is handled in the same way as restoration of the
11107 * PC in these situations: we will be called again with search_pc=1
11108 * and generate a mapping of the condexec bits for each PC in
11109 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
11110 * this to restore the condexec bits.
11112 * Note that there are no instructions which can read the condexec
11113 * bits, and none which can write non-static values to them, so
11114 * we don't need to care about whether CPUARMState is correct in the
11118 /* Reset the conditional execution bits immediately. This avoids
11119 complications trying to do it at the end of the block. */
11120 if (dc->condexec_mask || dc->condexec_cond)
11122 TCGv_i32 tmp = tcg_temp_new_i32();
11123 tcg_gen_movi_i32(tmp, 0);
11124 store_cpu_field(tmp, condexec_bits);
11127 #ifdef CONFIG_USER_ONLY
11128 /* Intercept jump to the magic kernel page. */
11129 if (dc->pc >= 0xffff0000) {
11130 /* We always get here via a jump, so know we are not in a
11131 conditional execution block. */
11132 gen_exception_internal(EXCP_KERNEL_TRAP);
11133 dc->is_jmp = DISAS_UPDATE;
11137 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11138 /* We always get here via a jump, so know we are not in a
11139 conditional execution block. */
11140 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11141 dc->is_jmp = DISAS_UPDATE;
11146 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11147 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11148 if (bp->pc == dc->pc) {
11149 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11150 /* Advance PC so that clearing the breakpoint will
11151 invalidate this TB. */
11153 goto done_generating;
11158 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
11162 tcg_ctx.gen_opc_instr_start[lj++] = 0;
11164 tcg_ctx.gen_opc_pc[lj] = dc->pc;
11165 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
11166 tcg_ctx.gen_opc_instr_start[lj] = 1;
11167 tcg_ctx.gen_opc_icount[lj] = num_insns;
11170 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11173 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
11174 tcg_gen_debug_insn_start(dc->pc);
11177 if (dc->ss_active && !dc->pstate_ss) {
11178 /* Singlestep state is Active-pending.
11179 * If we're in this state at the start of a TB then either
11180 * a) we just took an exception to an EL which is being debugged
11181 * and this is the first insn in the exception handler
11182 * b) debug exceptions were masked and we just unmasked them
11183 * without changing EL (eg by clearing PSTATE.D)
11184 * In either case we're going to take a swstep exception in the
11185 * "did not step an insn" case, and so the syndrome ISV and EX
11186 * bits should be zero.
11188 assert(num_insns == 0);
11189 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
11190 goto done_generating;
11194 disas_thumb_insn(env, dc);
11195 if (dc->condexec_mask) {
11196 dc->condexec_cond = (dc->condexec_cond & 0xe)
11197 | ((dc->condexec_mask >> 4) & 1);
11198 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11199 if (dc->condexec_mask == 0) {
11200 dc->condexec_cond = 0;
11204 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11206 disas_arm_insn(dc, insn);
11209 if (dc->condjmp && !dc->is_jmp) {
11210 gen_set_label(dc->condlabel);
11214 if (tcg_check_temp_count()) {
11215 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11219 /* Translation stops when a conditional branch is encountered.
11220 * Otherwise the subsequent code could get translated several times.
11221 * Also stop translation when a page boundary is reached. This
11222 * ensures prefetch aborts occur at the right place. */
11224 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
11225 !cs->singlestep_enabled &&
11228 dc->pc < next_page_start &&
11229 num_insns < max_insns);
11231 if (tb->cflags & CF_LAST_IO) {
11233 /* FIXME: This can theoretically happen with self-modifying
11235 cpu_abort(cs, "IO on conditional branch instruction");
11240 /* At this stage dc->condjmp will only be set when the skipped
11241 instruction was a conditional branch or trap, and the PC has
11242 already been written. */
11243 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11244 /* Make sure the pc is updated, and raise a debug exception. */
11246 gen_set_condexec(dc);
11247 if (dc->is_jmp == DISAS_SWI) {
11248 gen_ss_advance(dc);
11249 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11250 } else if (dc->is_jmp == DISAS_HVC) {
11251 gen_ss_advance(dc);
11252 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11253 } else if (dc->is_jmp == DISAS_SMC) {
11254 gen_ss_advance(dc);
11255 gen_exception(EXCP_SMC, syn_aa32_smc());
11256 } else if (dc->ss_active) {
11257 gen_step_complete_exception(dc);
11259 gen_exception_internal(EXCP_DEBUG);
11261 gen_set_label(dc->condlabel);
11263 if (dc->condjmp || !dc->is_jmp) {
11264 gen_set_pc_im(dc, dc->pc);
11267 gen_set_condexec(dc);
11268 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
11269 gen_ss_advance(dc);
11270 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11271 } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
11272 gen_ss_advance(dc);
11273 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11274 } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
11275 gen_ss_advance(dc);
11276 gen_exception(EXCP_SMC, syn_aa32_smc());
11277 } else if (dc->ss_active) {
11278 gen_step_complete_exception(dc);
11280 /* FIXME: Single stepping a WFI insn will not halt
11282 gen_exception_internal(EXCP_DEBUG);
11285 /* While branches must always occur at the end of an IT block,
11286 there are a few other things that can cause us to terminate
11287 the TB in the middle of an IT block:
11288 - Exception generating instructions (bkpt, swi, undefined).
11290 - Hardware watchpoints.
11291 Hardware breakpoints have already been handled and skip this code.
11293 gen_set_condexec(dc);
11294 switch(dc->is_jmp) {
11296 gen_goto_tb(dc, 1, dc->pc);
11301 /* indicate that the hash table must be used to find the next TB */
11302 tcg_gen_exit_tb(0);
11304 case DISAS_TB_JUMP:
11305 /* nothing more to generate */
11308 gen_helper_wfi(cpu_env);
11311 gen_helper_wfe(cpu_env);
11314 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11317 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11320 gen_exception(EXCP_SMC, syn_aa32_smc());
11324 gen_set_label(dc->condlabel);
11325 gen_set_condexec(dc);
11326 gen_goto_tb(dc, 1, dc->pc);
11332 gen_tb_end(tb, num_insns);
11333 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
11336 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
11337 qemu_log("----------------\n");
11338 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11339 log_target_disas(env, pc_start, dc->pc - pc_start,
11340 dc->thumb | (dc->bswap_code << 1));
11345 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
11348 tcg_ctx.gen_opc_instr_start[lj++] = 0;
11350 tb->size = dc->pc - pc_start;
11351 tb->icount = num_insns;
11355 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11357 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
11360 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
11362 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
11365 static const char *cpu_mode_names[16] = {
11366 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11367 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11370 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11373 ARMCPU *cpu = ARM_CPU(cs);
11374 CPUARMState *env = &cpu->env;
11379 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11383 for(i=0;i<16;i++) {
11384 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
11386 cpu_fprintf(f, "\n");
11388 cpu_fprintf(f, " ");
11390 psr = cpsr_read(env);
11391 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11393 psr & (1 << 31) ? 'N' : '-',
11394 psr & (1 << 30) ? 'Z' : '-',
11395 psr & (1 << 29) ? 'C' : '-',
11396 psr & (1 << 28) ? 'V' : '-',
11397 psr & CPSR_T ? 'T' : 'A',
11398 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
11400 if (flags & CPU_DUMP_FPU) {
11401 int numvfpregs = 0;
11402 if (arm_feature(env, ARM_FEATURE_VFP)) {
11405 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11408 for (i = 0; i < numvfpregs; i++) {
11409 uint64_t v = float64_val(env->vfp.regs[i]);
11410 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11411 i * 2, (uint32_t)v,
11412 i * 2 + 1, (uint32_t)(v >> 32),
11415 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
11419 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
11422 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
11423 env->condexec_bits = 0;
11425 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
11426 env->condexec_bits = gen_opc_condexec_bits[pc_pos];