4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
28 #include "tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 #include "exec/gen-icount.h"
71 static const char * const regnames[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
86 for (i = 0; i < 16; i++) {
87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
88 offsetof(CPUARMState, regs[i]),
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo {
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
147 static inline int get_a32_user_mem_index(DisasContext *s)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
177 g_assert_not_reached();
181 static inline TCGv_i32 load_cpu_offset(int offset)
183 TCGv_i32 tmp = tcg_temp_new_i32();
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
188 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
190 static inline void store_cpu_offset(TCGv_i32 var, int offset)
192 tcg_gen_st_i32(var, cpu_env, offset);
193 tcg_temp_free_i32(var);
196 #define store_cpu_field(var, name) \
197 store_cpu_offset(var, offsetof(CPUARMState, name))
199 /* The architectural value of PC. */
200 static uint32_t read_pc(DisasContext *s)
202 return s->pc_curr + (s->thumb ? 4 : 8);
205 /* Set a variable to the value of a CPU register. */
206 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
209 tcg_gen_movi_i32(var, read_pc(s));
211 tcg_gen_mov_i32(var, cpu_R[reg]);
215 /* Create a new temporary and set it to the value of a CPU register. */
216 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
218 TCGv_i32 tmp = tcg_temp_new_i32();
219 load_reg_var(s, tmp, reg);
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
228 static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
230 TCGv_i32 tmp = tcg_temp_new_i32();
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
240 /* Set a CPU register. The source must be a temporary and will be
242 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
251 s->base.is_jmp = DISAS_JUMP;
253 tcg_gen_mov_i32(cpu_R[reg], var);
254 tcg_temp_free_i32(var);
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
264 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
266 #ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
271 store_reg(s, 13, var);
274 /* Value extensions. */
275 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
277 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
280 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
284 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
288 tcg_temp_free_i32(tmp_mask);
290 /* Set NZCV flags from the high 4 bits of var. */
291 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
293 static void gen_exception_internal(int excp)
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
302 static void gen_step_complete_exception(DisasContext *s)
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
314 gen_swstep_exception(s, 1, s->is_ldex);
315 s->base.is_jmp = DISAS_NORETURN;
318 static void gen_singlestep_exception(DisasContext *s)
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
325 gen_step_complete_exception(s);
327 gen_exception_internal(EXCP_DEBUG);
331 static inline bool is_singlestepping(DisasContext *s)
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
339 return s->base.singlestep_enabled || s->ss_active;
342 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
349 tcg_temp_free_i32(tmp2);
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
354 tcg_temp_free_i32(tmp1);
357 /* Byteswap each halfword. */
358 static void gen_rev16(TCGv_i32 var)
360 TCGv_i32 tmp = tcg_temp_new_i32();
361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
362 tcg_gen_shri_i32(tmp, var, 8);
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
365 tcg_gen_shli_i32(var, var, 8);
366 tcg_gen_or_i32(var, var, tmp);
367 tcg_temp_free_i32(mask);
368 tcg_temp_free_i32(tmp);
371 /* Byteswap low halfword and sign extend. */
372 static void gen_revsh(TCGv_i32 var)
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
376 tcg_gen_ext16s_i32(var, var);
379 /* Return (b << 32) + a. Mark inputs as dead */
380 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
382 TCGv_i64 tmp64 = tcg_temp_new_i64();
384 tcg_gen_extu_i32_i64(tmp64, b);
385 tcg_temp_free_i32(b);
386 tcg_gen_shli_i64(tmp64, tmp64, 32);
387 tcg_gen_add_i64(a, tmp64, a);
389 tcg_temp_free_i64(tmp64);
393 /* Return (b << 32) - a. Mark inputs as dead. */
394 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
396 TCGv_i64 tmp64 = tcg_temp_new_i64();
398 tcg_gen_extu_i32_i64(tmp64, b);
399 tcg_temp_free_i32(b);
400 tcg_gen_shli_i64(tmp64, tmp64, 32);
401 tcg_gen_sub_i64(a, tmp64, a);
403 tcg_temp_free_i64(tmp64);
407 /* 32x32->64 multiply. Marks inputs as dead. */
408 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
410 TCGv_i32 lo = tcg_temp_new_i32();
411 TCGv_i32 hi = tcg_temp_new_i32();
414 tcg_gen_mulu2_i32(lo, hi, a, b);
415 tcg_temp_free_i32(a);
416 tcg_temp_free_i32(b);
418 ret = tcg_temp_new_i64();
419 tcg_gen_concat_i32_i64(ret, lo, hi);
420 tcg_temp_free_i32(lo);
421 tcg_temp_free_i32(hi);
426 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
428 TCGv_i32 lo = tcg_temp_new_i32();
429 TCGv_i32 hi = tcg_temp_new_i32();
432 tcg_gen_muls2_i32(lo, hi, a, b);
433 tcg_temp_free_i32(a);
434 tcg_temp_free_i32(b);
436 ret = tcg_temp_new_i64();
437 tcg_gen_concat_i32_i64(ret, lo, hi);
438 tcg_temp_free_i32(lo);
439 tcg_temp_free_i32(hi);
444 /* Swap low and high halfwords. */
445 static void gen_swap_half(TCGv_i32 var)
447 TCGv_i32 tmp = tcg_temp_new_i32();
448 tcg_gen_shri_i32(tmp, var, 16);
449 tcg_gen_shli_i32(var, var, 16);
450 tcg_gen_or_i32(var, var, tmp);
451 tcg_temp_free_i32(tmp);
454 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
455 tmp = (t0 ^ t1) & 0x8000;
458 t0 = (t0 + t1) ^ tmp;
461 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
463 TCGv_i32 tmp = tcg_temp_new_i32();
464 tcg_gen_xor_i32(tmp, t0, t1);
465 tcg_gen_andi_i32(tmp, tmp, 0x8000);
466 tcg_gen_andi_i32(t0, t0, ~0x8000);
467 tcg_gen_andi_i32(t1, t1, ~0x8000);
468 tcg_gen_add_i32(t0, t0, t1);
469 tcg_gen_xor_i32(t0, t0, tmp);
470 tcg_temp_free_i32(tmp);
471 tcg_temp_free_i32(t1);
474 /* Set CF to the top bit of var. */
475 static void gen_set_CF_bit31(TCGv_i32 var)
477 tcg_gen_shri_i32(cpu_CF, var, 31);
480 /* Set N and Z flags from var. */
481 static inline void gen_logic_CC(TCGv_i32 var)
483 tcg_gen_mov_i32(cpu_NF, var);
484 tcg_gen_mov_i32(cpu_ZF, var);
488 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
490 tcg_gen_add_i32(t0, t0, t1);
491 tcg_gen_add_i32(t0, t0, cpu_CF);
494 /* dest = T0 + T1 + CF. */
495 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
497 tcg_gen_add_i32(dest, t0, t1);
498 tcg_gen_add_i32(dest, dest, cpu_CF);
501 /* dest = T0 - T1 + CF - 1. */
502 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
504 tcg_gen_sub_i32(dest, t0, t1);
505 tcg_gen_add_i32(dest, dest, cpu_CF);
506 tcg_gen_subi_i32(dest, dest, 1);
509 /* dest = T0 + T1. Compute C, N, V and Z flags */
510 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
512 TCGv_i32 tmp = tcg_temp_new_i32();
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
515 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
516 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
517 tcg_gen_xor_i32(tmp, t0, t1);
518 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
519 tcg_temp_free_i32(tmp);
520 tcg_gen_mov_i32(dest, cpu_NF);
523 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
524 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
526 TCGv_i32 tmp = tcg_temp_new_i32();
527 if (TCG_TARGET_HAS_add2_i32) {
528 tcg_gen_movi_i32(tmp, 0);
529 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
532 TCGv_i64 q0 = tcg_temp_new_i64();
533 TCGv_i64 q1 = tcg_temp_new_i64();
534 tcg_gen_extu_i32_i64(q0, t0);
535 tcg_gen_extu_i32_i64(q1, t1);
536 tcg_gen_add_i64(q0, q0, q1);
537 tcg_gen_extu_i32_i64(q1, cpu_CF);
538 tcg_gen_add_i64(q0, q0, q1);
539 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
540 tcg_temp_free_i64(q0);
541 tcg_temp_free_i64(q1);
543 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
544 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
551 /* dest = T0 - T1. Compute C, N, V and Z flags */
552 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
555 tcg_gen_sub_i32(cpu_NF, t0, t1);
556 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
557 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
558 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
559 tmp = tcg_temp_new_i32();
560 tcg_gen_xor_i32(tmp, t0, t1);
561 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
562 tcg_temp_free_i32(tmp);
563 tcg_gen_mov_i32(dest, cpu_NF);
566 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
567 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
569 TCGv_i32 tmp = tcg_temp_new_i32();
570 tcg_gen_not_i32(tmp, t1);
571 gen_adc_CC(dest, t0, tmp);
572 tcg_temp_free_i32(tmp);
575 #define GEN_SHIFT(name) \
576 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
578 TCGv_i32 tmp1, tmp2, tmp3; \
579 tmp1 = tcg_temp_new_i32(); \
580 tcg_gen_andi_i32(tmp1, t1, 0xff); \
581 tmp2 = tcg_const_i32(0); \
582 tmp3 = tcg_const_i32(0x1f); \
583 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
584 tcg_temp_free_i32(tmp3); \
585 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
586 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
587 tcg_temp_free_i32(tmp2); \
588 tcg_temp_free_i32(tmp1); \
594 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
597 tmp1 = tcg_temp_new_i32();
598 tcg_gen_andi_i32(tmp1, t1, 0xff);
599 tmp2 = tcg_const_i32(0x1f);
600 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
601 tcg_temp_free_i32(tmp2);
602 tcg_gen_sar_i32(dest, t0, tmp1);
603 tcg_temp_free_i32(tmp1);
606 static void shifter_out_im(TCGv_i32 var, int shift)
608 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
611 /* Shift by immediate. Includes special handling for shift == 0. */
612 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
613 int shift, int flags)
619 shifter_out_im(var, 32 - shift);
620 tcg_gen_shli_i32(var, var, shift);
626 tcg_gen_shri_i32(cpu_CF, var, 31);
628 tcg_gen_movi_i32(var, 0);
631 shifter_out_im(var, shift - 1);
632 tcg_gen_shri_i32(var, var, shift);
639 shifter_out_im(var, shift - 1);
642 tcg_gen_sari_i32(var, var, shift);
644 case 3: /* ROR/RRX */
647 shifter_out_im(var, shift - 1);
648 tcg_gen_rotri_i32(var, var, shift); break;
650 TCGv_i32 tmp = tcg_temp_new_i32();
651 tcg_gen_shli_i32(tmp, cpu_CF, 31);
653 shifter_out_im(var, 0);
654 tcg_gen_shri_i32(var, var, 1);
655 tcg_gen_or_i32(var, var, tmp);
656 tcg_temp_free_i32(tmp);
661 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
662 TCGv_i32 shift, int flags)
666 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
667 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
668 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
669 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
674 gen_shl(var, var, shift);
677 gen_shr(var, var, shift);
680 gen_sar(var, var, shift);
682 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
683 tcg_gen_rotr_i32(var, var, shift); break;
686 tcg_temp_free_i32(shift);
689 #define PAS_OP(pfx) \
691 case 0: gen_pas_helper(glue(pfx,add16)); break; \
692 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
693 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
694 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
695 case 4: gen_pas_helper(glue(pfx,add8)); break; \
696 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
698 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
703 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 tmp = tcg_temp_new_ptr();
706 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
708 tcg_temp_free_ptr(tmp);
711 tmp = tcg_temp_new_ptr();
712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
714 tcg_temp_free_ptr(tmp);
716 #undef gen_pas_helper
717 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
730 #undef gen_pas_helper
735 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
736 #define PAS_OP(pfx) \
738 case 0: gen_pas_helper(glue(pfx,add8)); break; \
739 case 1: gen_pas_helper(glue(pfx,add16)); break; \
740 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
741 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
742 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
743 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
745 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
750 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
752 tmp = tcg_temp_new_ptr();
753 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
755 tcg_temp_free_ptr(tmp);
758 tmp = tcg_temp_new_ptr();
759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
761 tcg_temp_free_ptr(tmp);
763 #undef gen_pas_helper
764 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
777 #undef gen_pas_helper
783 * Generate a conditional based on ARM condition code cc.
784 * This is common between ARM and Aarch64 targets.
786 void arm_test_cc(DisasCompare *cmp, int cc)
817 case 8: /* hi: C && !Z */
818 case 9: /* ls: !C || Z -> !(C && !Z) */
820 value = tcg_temp_new_i32();
822 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
823 ZF is non-zero for !Z; so AND the two subexpressions. */
824 tcg_gen_neg_i32(value, cpu_CF);
825 tcg_gen_and_i32(value, value, cpu_ZF);
828 case 10: /* ge: N == V -> N ^ V == 0 */
829 case 11: /* lt: N != V -> N ^ V != 0 */
830 /* Since we're only interested in the sign bit, == 0 is >= 0. */
832 value = tcg_temp_new_i32();
834 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
837 case 12: /* gt: !Z && N == V */
838 case 13: /* le: Z || N != V */
840 value = tcg_temp_new_i32();
842 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
843 * the sign bit then AND with ZF to yield the result. */
844 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
845 tcg_gen_sari_i32(value, value, 31);
846 tcg_gen_andc_i32(value, cpu_ZF, value);
849 case 14: /* always */
850 case 15: /* always */
851 /* Use the ALWAYS condition, which will fold early.
852 * It doesn't matter what we use for the value. */
853 cond = TCG_COND_ALWAYS;
858 fprintf(stderr, "Bad condition code 0x%x\n", cc);
863 cond = tcg_invert_cond(cond);
869 cmp->value_global = global;
872 void arm_free_cc(DisasCompare *cmp)
874 if (!cmp->value_global) {
875 tcg_temp_free_i32(cmp->value);
879 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
881 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
884 void arm_gen_test_cc(int cc, TCGLabel *label)
887 arm_test_cc(&cmp, cc);
888 arm_jump_cc(&cmp, label);
892 static const uint8_t table_logic_cc[16] = {
911 static inline void gen_set_condexec(DisasContext *s)
913 if (s->condexec_mask) {
914 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
915 TCGv_i32 tmp = tcg_temp_new_i32();
916 tcg_gen_movi_i32(tmp, val);
917 store_cpu_field(tmp, condexec_bits);
921 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
923 tcg_gen_movi_i32(cpu_R[15], val);
926 /* Set PC and Thumb state from an immediate address. */
927 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
931 s->base.is_jmp = DISAS_JUMP;
932 if (s->thumb != (addr & 1)) {
933 tmp = tcg_temp_new_i32();
934 tcg_gen_movi_i32(tmp, addr & 1);
935 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
936 tcg_temp_free_i32(tmp);
938 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
941 /* Set PC and Thumb state from var. var is marked as dead. */
942 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
944 s->base.is_jmp = DISAS_JUMP;
945 tcg_gen_andi_i32(cpu_R[15], var, ~1);
946 tcg_gen_andi_i32(var, var, 1);
947 store_cpu_field(var, thumb);
950 /* Set PC and Thumb state from var. var is marked as dead.
951 * For M-profile CPUs, include logic to detect exception-return
952 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
953 * and BX reg, and no others, and happens only for code in Handler mode.
955 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
957 /* Generate the same code here as for a simple bx, but flag via
958 * s->base.is_jmp that we need to do the rest of the work later.
961 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
962 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
963 s->base.is_jmp = DISAS_BX_EXCRET;
967 static inline void gen_bx_excret_final_code(DisasContext *s)
969 /* Generate the code to finish possible exception return and end the TB */
970 TCGLabel *excret_label = gen_new_label();
973 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
974 /* Covers FNC_RETURN and EXC_RETURN magic */
975 min_magic = FNC_RETURN_MIN_MAGIC;
977 /* EXC_RETURN magic only */
978 min_magic = EXC_RETURN_MIN_MAGIC;
981 /* Is the new PC value in the magic range indicating exception return? */
982 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
983 /* No: end the TB as we would for a DISAS_JMP */
984 if (is_singlestepping(s)) {
985 gen_singlestep_exception(s);
987 tcg_gen_exit_tb(NULL, 0);
989 gen_set_label(excret_label);
990 /* Yes: this is an exception return.
991 * At this point in runtime env->regs[15] and env->thumb will hold
992 * the exception-return magic number, which do_v7m_exception_exit()
993 * will read. Nothing else will be able to see those values because
994 * the cpu-exec main loop guarantees that we will always go straight
995 * from raising the exception to the exception-handling code.
997 * gen_ss_advance(s) does nothing on M profile currently but
998 * calling it is conceptually the right thing as we have executed
999 * this instruction (compare SWI, HVC, SMC handling).
1002 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1005 static inline void gen_bxns(DisasContext *s, int rm)
1007 TCGv_i32 var = load_reg(s, rm);
1009 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1010 * we need to sync state before calling it, but:
1011 * - we don't need to do gen_set_pc_im() because the bxns helper will
1012 * always set the PC itself
1013 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1014 * unless it's outside an IT block or the last insn in an IT block,
1015 * so we know that condexec == 0 (already set at the top of the TB)
1016 * is correct in the non-UNPREDICTABLE cases, and we can choose
1017 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1019 gen_helper_v7m_bxns(cpu_env, var);
1020 tcg_temp_free_i32(var);
1021 s->base.is_jmp = DISAS_EXIT;
1024 static inline void gen_blxns(DisasContext *s, int rm)
1026 TCGv_i32 var = load_reg(s, rm);
1028 /* We don't need to sync condexec state, for the same reason as bxns.
1029 * We do however need to set the PC, because the blxns helper reads it.
1030 * The blxns helper may throw an exception.
1032 gen_set_pc_im(s, s->base.pc_next);
1033 gen_helper_v7m_blxns(cpu_env, var);
1034 tcg_temp_free_i32(var);
1035 s->base.is_jmp = DISAS_EXIT;
1038 /* Variant of store_reg which uses branch&exchange logic when storing
1039 to r15 in ARM architecture v7 and above. The source must be a temporary
1040 and will be marked as dead. */
1041 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1043 if (reg == 15 && ENABLE_ARCH_7) {
1046 store_reg(s, reg, var);
1050 /* Variant of store_reg which uses branch&exchange logic when storing
1051 * to r15 in ARM architecture v5T and above. This is used for storing
1052 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1053 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1054 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1056 if (reg == 15 && ENABLE_ARCH_5) {
1057 gen_bx_excret(s, var);
1059 store_reg(s, reg, var);
1063 #ifdef CONFIG_USER_ONLY
1064 #define IS_USER_ONLY 1
1066 #define IS_USER_ONLY 0
1069 /* Abstractions of "generate code to do a guest load/store for
1070 * AArch32", where a vaddr is always 32 bits (and is zero
1071 * extended if we're a 64 bit core) and data is also
1072 * 32 bits unless specifically doing a 64 bit access.
1073 * These functions work like tcg_gen_qemu_{ld,st}* except
1074 * that the address argument is TCGv_i32 rather than TCGv.
1077 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1079 TCGv addr = tcg_temp_new();
1080 tcg_gen_extu_i32_tl(addr, a32);
1082 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1083 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1084 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1089 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1090 int index, TCGMemOp opc)
1094 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1095 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1099 addr = gen_aa32_addr(s, a32, opc);
1100 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1101 tcg_temp_free(addr);
1104 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1105 int index, TCGMemOp opc)
1109 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1110 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1114 addr = gen_aa32_addr(s, a32, opc);
1115 tcg_gen_qemu_st_i32(val, addr, index, opc);
1116 tcg_temp_free(addr);
1119 #define DO_GEN_LD(SUFF, OPC) \
1120 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1121 TCGv_i32 a32, int index) \
1123 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1125 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1127 TCGv_i32 a32, int index, \
1130 gen_aa32_ld##SUFF(s, val, a32, index); \
1131 disas_set_da_iss(s, OPC, issinfo); \
1134 #define DO_GEN_ST(SUFF, OPC) \
1135 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1136 TCGv_i32 a32, int index) \
1138 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1140 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1142 TCGv_i32 a32, int index, \
1145 gen_aa32_st##SUFF(s, val, a32, index); \
1146 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1149 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1151 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1152 if (!IS_USER_ONLY && s->sctlr_b) {
1153 tcg_gen_rotri_i64(val, val, 32);
1157 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1158 int index, TCGMemOp opc)
1160 TCGv addr = gen_aa32_addr(s, a32, opc);
1161 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1162 gen_aa32_frob64(s, val);
1163 tcg_temp_free(addr);
1166 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1167 TCGv_i32 a32, int index)
1169 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1172 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1173 int index, TCGMemOp opc)
1175 TCGv addr = gen_aa32_addr(s, a32, opc);
1177 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1178 if (!IS_USER_ONLY && s->sctlr_b) {
1179 TCGv_i64 tmp = tcg_temp_new_i64();
1180 tcg_gen_rotri_i64(tmp, val, 32);
1181 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1182 tcg_temp_free_i64(tmp);
1184 tcg_gen_qemu_st_i64(val, addr, index, opc);
1186 tcg_temp_free(addr);
1189 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1190 TCGv_i32 a32, int index)
1192 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1195 DO_GEN_LD(8s, MO_SB)
1196 DO_GEN_LD(8u, MO_UB)
1197 DO_GEN_LD(16s, MO_SW)
1198 DO_GEN_LD(16u, MO_UW)
1199 DO_GEN_LD(32u, MO_UL)
1201 DO_GEN_ST(16, MO_UW)
1202 DO_GEN_ST(32, MO_UL)
1204 static inline void gen_hvc(DisasContext *s, int imm16)
1206 /* The pre HVC helper handles cases when HVC gets trapped
1207 * as an undefined insn by runtime configuration (ie before
1208 * the insn really executes).
1210 gen_set_pc_im(s, s->pc_curr);
1211 gen_helper_pre_hvc(cpu_env);
1212 /* Otherwise we will treat this as a real exception which
1213 * happens after execution of the insn. (The distinction matters
1214 * for the PC value reported to the exception handler and also
1215 * for single stepping.)
1218 gen_set_pc_im(s, s->base.pc_next);
1219 s->base.is_jmp = DISAS_HVC;
1222 static inline void gen_smc(DisasContext *s)
1224 /* As with HVC, we may take an exception either before or after
1225 * the insn executes.
1229 gen_set_pc_im(s, s->pc_curr);
1230 tmp = tcg_const_i32(syn_aa32_smc());
1231 gen_helper_pre_smc(cpu_env, tmp);
1232 tcg_temp_free_i32(tmp);
1233 gen_set_pc_im(s, s->base.pc_next);
1234 s->base.is_jmp = DISAS_SMC;
1237 static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
1239 gen_set_condexec(s);
1240 gen_set_pc_im(s, pc);
1241 gen_exception_internal(excp);
1242 s->base.is_jmp = DISAS_NORETURN;
1245 static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
1246 int syn, uint32_t target_el)
1248 gen_set_condexec(s);
1249 gen_set_pc_im(s, pc);
1250 gen_exception(excp, syn, target_el);
1251 s->base.is_jmp = DISAS_NORETURN;
1254 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1258 gen_set_condexec(s);
1259 gen_set_pc_im(s, s->pc_curr);
1260 tcg_syn = tcg_const_i32(syn);
1261 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1262 tcg_temp_free_i32(tcg_syn);
1263 s->base.is_jmp = DISAS_NORETURN;
1266 void unallocated_encoding(DisasContext *s)
1268 /* Unallocated and reserved encodings are uncategorized */
1269 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1270 default_exception_el(s));
1273 /* Force a TB lookup after an instruction that changes the CPU state. */
1274 static inline void gen_lookup_tb(DisasContext *s)
1276 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
1277 s->base.is_jmp = DISAS_EXIT;
1280 static inline void gen_hlt(DisasContext *s, int imm)
1282 /* HLT. This has two purposes.
1283 * Architecturally, it is an external halting debug instruction.
1284 * Since QEMU doesn't implement external debug, we treat this as
1285 * it is required for halting debug disabled: it will UNDEF.
1286 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1287 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1288 * must trigger semihosting even for ARMv7 and earlier, where
1289 * HLT was an undefined encoding.
1290 * In system mode, we don't allow userspace access to
1291 * semihosting, to provide some semblance of security
1292 * (and for consistency with our 32-bit semihosting).
1294 if (semihosting_enabled() &&
1295 #ifndef CONFIG_USER_ONLY
1296 s->current_el != 0 &&
1298 (imm == (s->thumb ? 0x3c : 0xf000))) {
1299 gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
1303 unallocated_encoding(s);
1306 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1309 int val, rm, shift, shiftop;
1312 if (!(insn & (1 << 25))) {
1315 if (!(insn & (1 << 23)))
1318 tcg_gen_addi_i32(var, var, val);
1320 /* shift/register */
1322 shift = (insn >> 7) & 0x1f;
1323 shiftop = (insn >> 5) & 3;
1324 offset = load_reg(s, rm);
1325 gen_arm_shift_im(offset, shiftop, shift, 0);
1326 if (!(insn & (1 << 23)))
1327 tcg_gen_sub_i32(var, var, offset);
1329 tcg_gen_add_i32(var, var, offset);
1330 tcg_temp_free_i32(offset);
1334 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1335 int extra, TCGv_i32 var)
1340 if (insn & (1 << 22)) {
1342 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1343 if (!(insn & (1 << 23)))
1347 tcg_gen_addi_i32(var, var, val);
1351 tcg_gen_addi_i32(var, var, extra);
1353 offset = load_reg(s, rm);
1354 if (!(insn & (1 << 23)))
1355 tcg_gen_sub_i32(var, var, offset);
1357 tcg_gen_add_i32(var, var, offset);
1358 tcg_temp_free_i32(offset);
1362 static TCGv_ptr get_fpstatus_ptr(int neon)
1364 TCGv_ptr statusptr = tcg_temp_new_ptr();
1367 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1369 offset = offsetof(CPUARMState, vfp.fp_status);
1371 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1375 static inline long vfp_reg_offset(bool dp, unsigned reg)
1378 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1380 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1382 ofs += offsetof(CPU_DoubleU, l.upper);
1384 ofs += offsetof(CPU_DoubleU, l.lower);
1390 /* Return the offset of a 32-bit piece of a NEON register.
1391 zero is the least significant end of the register. */
1393 neon_reg_offset (int reg, int n)
1397 return vfp_reg_offset(0, sreg);
1400 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1401 * where 0 is the least significant end of the register.
1404 neon_element_offset(int reg, int element, TCGMemOp size)
1406 int element_size = 1 << size;
1407 int ofs = element * element_size;
1408 #ifdef HOST_WORDS_BIGENDIAN
1409 /* Calculate the offset assuming fully little-endian,
1410 * then XOR to account for the order of the 8-byte units.
1412 if (element_size < 8) {
1413 ofs ^= 8 - element_size;
1416 return neon_reg_offset(reg, 0) + ofs;
1419 static TCGv_i32 neon_load_reg(int reg, int pass)
1421 TCGv_i32 tmp = tcg_temp_new_i32();
1422 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1426 static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1428 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1432 tcg_gen_ld8u_i32(var, cpu_env, offset);
1435 tcg_gen_ld16u_i32(var, cpu_env, offset);
1438 tcg_gen_ld_i32(var, cpu_env, offset);
1441 g_assert_not_reached();
1445 static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1447 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1451 tcg_gen_ld8u_i64(var, cpu_env, offset);
1454 tcg_gen_ld16u_i64(var, cpu_env, offset);
1457 tcg_gen_ld32u_i64(var, cpu_env, offset);
1460 tcg_gen_ld_i64(var, cpu_env, offset);
1463 g_assert_not_reached();
1467 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1469 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1470 tcg_temp_free_i32(var);
1473 static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1475 long offset = neon_element_offset(reg, ele, size);
1479 tcg_gen_st8_i32(var, cpu_env, offset);
1482 tcg_gen_st16_i32(var, cpu_env, offset);
1485 tcg_gen_st_i32(var, cpu_env, offset);
1488 g_assert_not_reached();
1492 static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1494 long offset = neon_element_offset(reg, ele, size);
1498 tcg_gen_st8_i64(var, cpu_env, offset);
1501 tcg_gen_st16_i64(var, cpu_env, offset);
1504 tcg_gen_st32_i64(var, cpu_env, offset);
1507 tcg_gen_st_i64(var, cpu_env, offset);
1510 g_assert_not_reached();
1514 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1516 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1519 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1521 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1524 static inline void neon_load_reg32(TCGv_i32 var, int reg)
1526 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1529 static inline void neon_store_reg32(TCGv_i32 var, int reg)
1531 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1534 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1536 TCGv_ptr ret = tcg_temp_new_ptr();
1537 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1541 #define ARM_CP_RW_BIT (1 << 20)
1543 /* Include the VFP decoder */
1544 #include "translate-vfp.inc.c"
1546 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1548 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1551 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1553 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1556 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1558 TCGv_i32 var = tcg_temp_new_i32();
1559 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1563 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1565 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1566 tcg_temp_free_i32(var);
1569 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1571 iwmmxt_store_reg(cpu_M0, rn);
1574 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1576 iwmmxt_load_reg(cpu_M0, rn);
1579 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1581 iwmmxt_load_reg(cpu_V1, rn);
1582 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1585 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1587 iwmmxt_load_reg(cpu_V1, rn);
1588 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1591 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1593 iwmmxt_load_reg(cpu_V1, rn);
1594 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1597 #define IWMMXT_OP(name) \
1598 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1600 iwmmxt_load_reg(cpu_V1, rn); \
1601 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1604 #define IWMMXT_OP_ENV(name) \
1605 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1607 iwmmxt_load_reg(cpu_V1, rn); \
1608 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1611 #define IWMMXT_OP_ENV_SIZE(name) \
1612 IWMMXT_OP_ENV(name##b) \
1613 IWMMXT_OP_ENV(name##w) \
1614 IWMMXT_OP_ENV(name##l)
1616 #define IWMMXT_OP_ENV1(name) \
1617 static inline void gen_op_iwmmxt_##name##_M0(void) \
1619 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1633 IWMMXT_OP_ENV_SIZE(unpackl)
1634 IWMMXT_OP_ENV_SIZE(unpackh)
1636 IWMMXT_OP_ENV1(unpacklub)
1637 IWMMXT_OP_ENV1(unpackluw)
1638 IWMMXT_OP_ENV1(unpacklul)
1639 IWMMXT_OP_ENV1(unpackhub)
1640 IWMMXT_OP_ENV1(unpackhuw)
1641 IWMMXT_OP_ENV1(unpackhul)
1642 IWMMXT_OP_ENV1(unpacklsb)
1643 IWMMXT_OP_ENV1(unpacklsw)
1644 IWMMXT_OP_ENV1(unpacklsl)
1645 IWMMXT_OP_ENV1(unpackhsb)
1646 IWMMXT_OP_ENV1(unpackhsw)
1647 IWMMXT_OP_ENV1(unpackhsl)
1649 IWMMXT_OP_ENV_SIZE(cmpeq)
1650 IWMMXT_OP_ENV_SIZE(cmpgtu)
1651 IWMMXT_OP_ENV_SIZE(cmpgts)
1653 IWMMXT_OP_ENV_SIZE(mins)
1654 IWMMXT_OP_ENV_SIZE(minu)
1655 IWMMXT_OP_ENV_SIZE(maxs)
1656 IWMMXT_OP_ENV_SIZE(maxu)
1658 IWMMXT_OP_ENV_SIZE(subn)
1659 IWMMXT_OP_ENV_SIZE(addn)
1660 IWMMXT_OP_ENV_SIZE(subu)
1661 IWMMXT_OP_ENV_SIZE(addu)
1662 IWMMXT_OP_ENV_SIZE(subs)
1663 IWMMXT_OP_ENV_SIZE(adds)
1665 IWMMXT_OP_ENV(avgb0)
1666 IWMMXT_OP_ENV(avgb1)
1667 IWMMXT_OP_ENV(avgw0)
1668 IWMMXT_OP_ENV(avgw1)
1670 IWMMXT_OP_ENV(packuw)
1671 IWMMXT_OP_ENV(packul)
1672 IWMMXT_OP_ENV(packuq)
1673 IWMMXT_OP_ENV(packsw)
1674 IWMMXT_OP_ENV(packsl)
1675 IWMMXT_OP_ENV(packsq)
1677 static void gen_op_iwmmxt_set_mup(void)
1680 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1681 tcg_gen_ori_i32(tmp, tmp, 2);
1682 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1685 static void gen_op_iwmmxt_set_cup(void)
1688 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1689 tcg_gen_ori_i32(tmp, tmp, 1);
1690 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1693 static void gen_op_iwmmxt_setpsr_nz(void)
1695 TCGv_i32 tmp = tcg_temp_new_i32();
1696 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1697 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1700 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1702 iwmmxt_load_reg(cpu_V1, rn);
1703 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1704 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1707 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1714 rd = (insn >> 16) & 0xf;
1715 tmp = load_reg(s, rd);
1717 offset = (insn & 0xff) << ((insn >> 7) & 2);
1718 if (insn & (1 << 24)) {
1720 if (insn & (1 << 23))
1721 tcg_gen_addi_i32(tmp, tmp, offset);
1723 tcg_gen_addi_i32(tmp, tmp, -offset);
1724 tcg_gen_mov_i32(dest, tmp);
1725 if (insn & (1 << 21))
1726 store_reg(s, rd, tmp);
1728 tcg_temp_free_i32(tmp);
1729 } else if (insn & (1 << 21)) {
1731 tcg_gen_mov_i32(dest, tmp);
1732 if (insn & (1 << 23))
1733 tcg_gen_addi_i32(tmp, tmp, offset);
1735 tcg_gen_addi_i32(tmp, tmp, -offset);
1736 store_reg(s, rd, tmp);
1737 } else if (!(insn & (1 << 23)))
1742 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1744 int rd = (insn >> 0) & 0xf;
1747 if (insn & (1 << 8)) {
1748 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1751 tmp = iwmmxt_load_creg(rd);
1754 tmp = tcg_temp_new_i32();
1755 iwmmxt_load_reg(cpu_V0, rd);
1756 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1758 tcg_gen_andi_i32(tmp, tmp, mask);
1759 tcg_gen_mov_i32(dest, tmp);
1760 tcg_temp_free_i32(tmp);
1764 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1765 (ie. an undefined instruction). */
1766 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1769 int rdhi, rdlo, rd0, rd1, i;
1771 TCGv_i32 tmp, tmp2, tmp3;
1773 if ((insn & 0x0e000e00) == 0x0c000000) {
1774 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1776 rdlo = (insn >> 12) & 0xf;
1777 rdhi = (insn >> 16) & 0xf;
1778 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1779 iwmmxt_load_reg(cpu_V0, wrd);
1780 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1781 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1782 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1783 } else { /* TMCRR */
1784 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1785 iwmmxt_store_reg(cpu_V0, wrd);
1786 gen_op_iwmmxt_set_mup();
1791 wrd = (insn >> 12) & 0xf;
1792 addr = tcg_temp_new_i32();
1793 if (gen_iwmmxt_address(s, insn, addr)) {
1794 tcg_temp_free_i32(addr);
1797 if (insn & ARM_CP_RW_BIT) {
1798 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1799 tmp = tcg_temp_new_i32();
1800 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1801 iwmmxt_store_creg(wrd, tmp);
1804 if (insn & (1 << 8)) {
1805 if (insn & (1 << 22)) { /* WLDRD */
1806 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1808 } else { /* WLDRW wRd */
1809 tmp = tcg_temp_new_i32();
1810 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1813 tmp = tcg_temp_new_i32();
1814 if (insn & (1 << 22)) { /* WLDRH */
1815 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1816 } else { /* WLDRB */
1817 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1821 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1822 tcg_temp_free_i32(tmp);
1824 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1828 tmp = iwmmxt_load_creg(wrd);
1829 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1831 gen_op_iwmmxt_movq_M0_wRn(wrd);
1832 tmp = tcg_temp_new_i32();
1833 if (insn & (1 << 8)) {
1834 if (insn & (1 << 22)) { /* WSTRD */
1835 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1836 } else { /* WSTRW wRd */
1837 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1838 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1841 if (insn & (1 << 22)) { /* WSTRH */
1842 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1843 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1844 } else { /* WSTRB */
1845 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1846 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1850 tcg_temp_free_i32(tmp);
1852 tcg_temp_free_i32(addr);
1856 if ((insn & 0x0f000000) != 0x0e000000)
1859 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1860 case 0x000: /* WOR */
1861 wrd = (insn >> 12) & 0xf;
1862 rd0 = (insn >> 0) & 0xf;
1863 rd1 = (insn >> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0);
1865 gen_op_iwmmxt_orq_M0_wRn(rd1);
1866 gen_op_iwmmxt_setpsr_nz();
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 gen_op_iwmmxt_set_mup();
1869 gen_op_iwmmxt_set_cup();
1871 case 0x011: /* TMCR */
1874 rd = (insn >> 12) & 0xf;
1875 wrd = (insn >> 16) & 0xf;
1877 case ARM_IWMMXT_wCID:
1878 case ARM_IWMMXT_wCASF:
1880 case ARM_IWMMXT_wCon:
1881 gen_op_iwmmxt_set_cup();
1883 case ARM_IWMMXT_wCSSF:
1884 tmp = iwmmxt_load_creg(wrd);
1885 tmp2 = load_reg(s, rd);
1886 tcg_gen_andc_i32(tmp, tmp, tmp2);
1887 tcg_temp_free_i32(tmp2);
1888 iwmmxt_store_creg(wrd, tmp);
1890 case ARM_IWMMXT_wCGR0:
1891 case ARM_IWMMXT_wCGR1:
1892 case ARM_IWMMXT_wCGR2:
1893 case ARM_IWMMXT_wCGR3:
1894 gen_op_iwmmxt_set_cup();
1895 tmp = load_reg(s, rd);
1896 iwmmxt_store_creg(wrd, tmp);
1902 case 0x100: /* WXOR */
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 0) & 0xf;
1905 rd1 = (insn >> 16) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1908 gen_op_iwmmxt_setpsr_nz();
1909 gen_op_iwmmxt_movq_wRn_M0(wrd);
1910 gen_op_iwmmxt_set_mup();
1911 gen_op_iwmmxt_set_cup();
1913 case 0x111: /* TMRC */
1916 rd = (insn >> 12) & 0xf;
1917 wrd = (insn >> 16) & 0xf;
1918 tmp = iwmmxt_load_creg(wrd);
1919 store_reg(s, rd, tmp);
1921 case 0x300: /* WANDN */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 0) & 0xf;
1924 rd1 = (insn >> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
1926 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1927 gen_op_iwmmxt_andq_M0_wRn(rd1);
1928 gen_op_iwmmxt_setpsr_nz();
1929 gen_op_iwmmxt_movq_wRn_M0(wrd);
1930 gen_op_iwmmxt_set_mup();
1931 gen_op_iwmmxt_set_cup();
1933 case 0x200: /* WAND */
1934 wrd = (insn >> 12) & 0xf;
1935 rd0 = (insn >> 0) & 0xf;
1936 rd1 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 gen_op_iwmmxt_andq_M0_wRn(rd1);
1939 gen_op_iwmmxt_setpsr_nz();
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 gen_op_iwmmxt_set_cup();
1944 case 0x810: case 0xa10: /* WMADD */
1945 wrd = (insn >> 12) & 0xf;
1946 rd0 = (insn >> 0) & 0xf;
1947 rd1 = (insn >> 16) & 0xf;
1948 gen_op_iwmmxt_movq_M0_wRn(rd0);
1949 if (insn & (1 << 21))
1950 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1952 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1956 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 switch ((insn >> 22) & 3) {
1963 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1966 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1969 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1978 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1979 wrd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
1981 rd1 = (insn >> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
1983 switch ((insn >> 22) & 3) {
1985 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1988 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1991 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
2000 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 rd1 = (insn >> 0) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
2005 if (insn & (1 << 22))
2006 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2008 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2009 if (!(insn & (1 << 20)))
2010 gen_op_iwmmxt_addl_M0_wRn(wrd);
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2014 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 rd1 = (insn >> 0) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
2019 if (insn & (1 << 21)) {
2020 if (insn & (1 << 20))
2021 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2023 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2025 if (insn & (1 << 20))
2026 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2028 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2030 gen_op_iwmmxt_movq_wRn_M0(wrd);
2031 gen_op_iwmmxt_set_mup();
2033 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 if (insn & (1 << 21))
2039 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2041 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2042 if (!(insn & (1 << 20))) {
2043 iwmmxt_load_reg(cpu_V1, wrd);
2044 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2049 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2050 wrd = (insn >> 12) & 0xf;
2051 rd0 = (insn >> 16) & 0xf;
2052 rd1 = (insn >> 0) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
2054 switch ((insn >> 22) & 3) {
2056 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2059 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2062 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2067 gen_op_iwmmxt_movq_wRn_M0(wrd);
2068 gen_op_iwmmxt_set_mup();
2069 gen_op_iwmmxt_set_cup();
2071 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 rd1 = (insn >> 0) & 0xf;
2075 gen_op_iwmmxt_movq_M0_wRn(rd0);
2076 if (insn & (1 << 22)) {
2077 if (insn & (1 << 20))
2078 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2080 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2082 if (insn & (1 << 20))
2083 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2085 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2091 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 rd1 = (insn >> 0) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0);
2096 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2097 tcg_gen_andi_i32(tmp, tmp, 7);
2098 iwmmxt_load_reg(cpu_V1, rd1);
2099 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2100 tcg_temp_free_i32(tmp);
2101 gen_op_iwmmxt_movq_wRn_M0(wrd);
2102 gen_op_iwmmxt_set_mup();
2104 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2105 if (((insn >> 6) & 3) == 3)
2107 rd = (insn >> 12) & 0xf;
2108 wrd = (insn >> 16) & 0xf;
2109 tmp = load_reg(s, rd);
2110 gen_op_iwmmxt_movq_M0_wRn(wrd);
2111 switch ((insn >> 6) & 3) {
2113 tmp2 = tcg_const_i32(0xff);
2114 tmp3 = tcg_const_i32((insn & 7) << 3);
2117 tmp2 = tcg_const_i32(0xffff);
2118 tmp3 = tcg_const_i32((insn & 3) << 4);
2121 tmp2 = tcg_const_i32(0xffffffff);
2122 tmp3 = tcg_const_i32((insn & 1) << 5);
2128 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2129 tcg_temp_free_i32(tmp3);
2130 tcg_temp_free_i32(tmp2);
2131 tcg_temp_free_i32(tmp);
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2135 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2136 rd = (insn >> 12) & 0xf;
2137 wrd = (insn >> 16) & 0xf;
2138 if (rd == 15 || ((insn >> 22) & 3) == 3)
2140 gen_op_iwmmxt_movq_M0_wRn(wrd);
2141 tmp = tcg_temp_new_i32();
2142 switch ((insn >> 22) & 3) {
2144 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2145 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2147 tcg_gen_ext8s_i32(tmp, tmp);
2149 tcg_gen_andi_i32(tmp, tmp, 0xff);
2153 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2154 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2156 tcg_gen_ext16s_i32(tmp, tmp);
2158 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2162 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2163 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2166 store_reg(s, rd, tmp);
2168 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2169 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2171 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2172 switch ((insn >> 22) & 3) {
2174 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2177 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2180 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2183 tcg_gen_shli_i32(tmp, tmp, 28);
2185 tcg_temp_free_i32(tmp);
2187 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2188 if (((insn >> 6) & 3) == 3)
2190 rd = (insn >> 12) & 0xf;
2191 wrd = (insn >> 16) & 0xf;
2192 tmp = load_reg(s, rd);
2193 switch ((insn >> 6) & 3) {
2195 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2198 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2201 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2204 tcg_temp_free_i32(tmp);
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2208 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2209 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2211 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2212 tmp2 = tcg_temp_new_i32();
2213 tcg_gen_mov_i32(tmp2, tmp);
2214 switch ((insn >> 22) & 3) {
2216 for (i = 0; i < 7; i ++) {
2217 tcg_gen_shli_i32(tmp2, tmp2, 4);
2218 tcg_gen_and_i32(tmp, tmp, tmp2);
2222 for (i = 0; i < 3; i ++) {
2223 tcg_gen_shli_i32(tmp2, tmp2, 8);
2224 tcg_gen_and_i32(tmp, tmp, tmp2);
2228 tcg_gen_shli_i32(tmp2, tmp2, 16);
2229 tcg_gen_and_i32(tmp, tmp, tmp2);
2233 tcg_temp_free_i32(tmp2);
2234 tcg_temp_free_i32(tmp);
2236 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
2240 switch ((insn >> 22) & 3) {
2242 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2245 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2248 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2253 gen_op_iwmmxt_movq_wRn_M0(wrd);
2254 gen_op_iwmmxt_set_mup();
2256 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2257 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2259 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2260 tmp2 = tcg_temp_new_i32();
2261 tcg_gen_mov_i32(tmp2, tmp);
2262 switch ((insn >> 22) & 3) {
2264 for (i = 0; i < 7; i ++) {
2265 tcg_gen_shli_i32(tmp2, tmp2, 4);
2266 tcg_gen_or_i32(tmp, tmp, tmp2);
2270 for (i = 0; i < 3; i ++) {
2271 tcg_gen_shli_i32(tmp2, tmp2, 8);
2272 tcg_gen_or_i32(tmp, tmp, tmp2);
2276 tcg_gen_shli_i32(tmp2, tmp2, 16);
2277 tcg_gen_or_i32(tmp, tmp, tmp2);
2281 tcg_temp_free_i32(tmp2);
2282 tcg_temp_free_i32(tmp);
2284 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2285 rd = (insn >> 12) & 0xf;
2286 rd0 = (insn >> 16) & 0xf;
2287 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
2290 tmp = tcg_temp_new_i32();
2291 switch ((insn >> 22) & 3) {
2293 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2296 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2299 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2302 store_reg(s, rd, tmp);
2304 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2305 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2306 wrd = (insn >> 12) & 0xf;
2307 rd0 = (insn >> 16) & 0xf;
2308 rd1 = (insn >> 0) & 0xf;
2309 gen_op_iwmmxt_movq_M0_wRn(rd0);
2310 switch ((insn >> 22) & 3) {
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2315 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2321 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2324 if (insn & (1 << 21))
2325 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2327 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2332 gen_op_iwmmxt_movq_wRn_M0(wrd);
2333 gen_op_iwmmxt_set_mup();
2334 gen_op_iwmmxt_set_cup();
2336 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2337 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2338 wrd = (insn >> 12) & 0xf;
2339 rd0 = (insn >> 16) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0);
2341 switch ((insn >> 22) & 3) {
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_unpacklsb_M0();
2346 gen_op_iwmmxt_unpacklub_M0();
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_unpacklsw_M0();
2352 gen_op_iwmmxt_unpackluw_M0();
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_unpacklsl_M0();
2358 gen_op_iwmmxt_unpacklul_M0();
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2367 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2368 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2369 wrd = (insn >> 12) & 0xf;
2370 rd0 = (insn >> 16) & 0xf;
2371 gen_op_iwmmxt_movq_M0_wRn(rd0);
2372 switch ((insn >> 22) & 3) {
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_unpackhsb_M0();
2377 gen_op_iwmmxt_unpackhub_M0();
2380 if (insn & (1 << 21))
2381 gen_op_iwmmxt_unpackhsw_M0();
2383 gen_op_iwmmxt_unpackhuw_M0();
2386 if (insn & (1 << 21))
2387 gen_op_iwmmxt_unpackhsl_M0();
2389 gen_op_iwmmxt_unpackhul_M0();
2394 gen_op_iwmmxt_movq_wRn_M0(wrd);
2395 gen_op_iwmmxt_set_mup();
2396 gen_op_iwmmxt_set_cup();
2398 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2399 case 0x214: case 0x614: case 0xa14: case 0xe14:
2400 if (((insn >> 22) & 3) == 0)
2402 wrd = (insn >> 12) & 0xf;
2403 rd0 = (insn >> 16) & 0xf;
2404 gen_op_iwmmxt_movq_M0_wRn(rd0);
2405 tmp = tcg_temp_new_i32();
2406 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2407 tcg_temp_free_i32(tmp);
2410 switch ((insn >> 22) & 3) {
2412 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2415 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2418 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2421 tcg_temp_free_i32(tmp);
2422 gen_op_iwmmxt_movq_wRn_M0(wrd);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2426 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2427 case 0x014: case 0x414: case 0x814: case 0xc14:
2428 if (((insn >> 22) & 3) == 0)
2430 wrd = (insn >> 12) & 0xf;
2431 rd0 = (insn >> 16) & 0xf;
2432 gen_op_iwmmxt_movq_M0_wRn(rd0);
2433 tmp = tcg_temp_new_i32();
2434 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2435 tcg_temp_free_i32(tmp);
2438 switch ((insn >> 22) & 3) {
2440 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2443 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2446 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2449 tcg_temp_free_i32(tmp);
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 gen_op_iwmmxt_set_cup();
2454 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2455 case 0x114: case 0x514: case 0x914: case 0xd14:
2456 if (((insn >> 22) & 3) == 0)
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 gen_op_iwmmxt_movq_M0_wRn(rd0);
2461 tmp = tcg_temp_new_i32();
2462 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2463 tcg_temp_free_i32(tmp);
2466 switch ((insn >> 22) & 3) {
2468 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2471 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2474 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2477 tcg_temp_free_i32(tmp);
2478 gen_op_iwmmxt_movq_wRn_M0(wrd);
2479 gen_op_iwmmxt_set_mup();
2480 gen_op_iwmmxt_set_cup();
2482 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2483 case 0x314: case 0x714: case 0xb14: case 0xf14:
2484 if (((insn >> 22) & 3) == 0)
2486 wrd = (insn >> 12) & 0xf;
2487 rd0 = (insn >> 16) & 0xf;
2488 gen_op_iwmmxt_movq_M0_wRn(rd0);
2489 tmp = tcg_temp_new_i32();
2490 switch ((insn >> 22) & 3) {
2492 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2493 tcg_temp_free_i32(tmp);
2496 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2499 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2500 tcg_temp_free_i32(tmp);
2503 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2506 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2507 tcg_temp_free_i32(tmp);
2510 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2513 tcg_temp_free_i32(tmp);
2514 gen_op_iwmmxt_movq_wRn_M0(wrd);
2515 gen_op_iwmmxt_set_mup();
2516 gen_op_iwmmxt_set_cup();
2518 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2519 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2520 wrd = (insn >> 12) & 0xf;
2521 rd0 = (insn >> 16) & 0xf;
2522 rd1 = (insn >> 0) & 0xf;
2523 gen_op_iwmmxt_movq_M0_wRn(rd0);
2524 switch ((insn >> 22) & 3) {
2526 if (insn & (1 << 21))
2527 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2529 gen_op_iwmmxt_minub_M0_wRn(rd1);
2532 if (insn & (1 << 21))
2533 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2535 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2538 if (insn & (1 << 21))
2539 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2541 gen_op_iwmmxt_minul_M0_wRn(rd1);
2546 gen_op_iwmmxt_movq_wRn_M0(wrd);
2547 gen_op_iwmmxt_set_mup();
2549 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2550 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2551 wrd = (insn >> 12) & 0xf;
2552 rd0 = (insn >> 16) & 0xf;
2553 rd1 = (insn >> 0) & 0xf;
2554 gen_op_iwmmxt_movq_M0_wRn(rd0);
2555 switch ((insn >> 22) & 3) {
2557 if (insn & (1 << 21))
2558 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2560 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2563 if (insn & (1 << 21))
2564 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2566 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2569 if (insn & (1 << 21))
2570 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2572 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2577 gen_op_iwmmxt_movq_wRn_M0(wrd);
2578 gen_op_iwmmxt_set_mup();
2580 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2581 case 0x402: case 0x502: case 0x602: case 0x702:
2582 wrd = (insn >> 12) & 0xf;
2583 rd0 = (insn >> 16) & 0xf;
2584 rd1 = (insn >> 0) & 0xf;
2585 gen_op_iwmmxt_movq_M0_wRn(rd0);
2586 tmp = tcg_const_i32((insn >> 20) & 3);
2587 iwmmxt_load_reg(cpu_V1, rd1);
2588 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2589 tcg_temp_free_i32(tmp);
2590 gen_op_iwmmxt_movq_wRn_M0(wrd);
2591 gen_op_iwmmxt_set_mup();
2593 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2594 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2595 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2596 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2597 wrd = (insn >> 12) & 0xf;
2598 rd0 = (insn >> 16) & 0xf;
2599 rd1 = (insn >> 0) & 0xf;
2600 gen_op_iwmmxt_movq_M0_wRn(rd0);
2601 switch ((insn >> 20) & 0xf) {
2603 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2606 gen_op_iwmmxt_subub_M0_wRn(rd1);
2609 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2612 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2615 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2618 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2621 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2624 gen_op_iwmmxt_subul_M0_wRn(rd1);
2627 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 gen_op_iwmmxt_set_cup();
2636 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2637 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2638 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2639 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2640 wrd = (insn >> 12) & 0xf;
2641 rd0 = (insn >> 16) & 0xf;
2642 gen_op_iwmmxt_movq_M0_wRn(rd0);
2643 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2644 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2645 tcg_temp_free_i32(tmp);
2646 gen_op_iwmmxt_movq_wRn_M0(wrd);
2647 gen_op_iwmmxt_set_mup();
2648 gen_op_iwmmxt_set_cup();
2650 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2651 case 0x418: case 0x518: case 0x618: case 0x718:
2652 case 0x818: case 0x918: case 0xa18: case 0xb18:
2653 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2654 wrd = (insn >> 12) & 0xf;
2655 rd0 = (insn >> 16) & 0xf;
2656 rd1 = (insn >> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0);
2658 switch ((insn >> 20) & 0xf) {
2660 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2663 gen_op_iwmmxt_addub_M0_wRn(rd1);
2666 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2669 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2672 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2675 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2678 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2681 gen_op_iwmmxt_addul_M0_wRn(rd1);
2684 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2689 gen_op_iwmmxt_movq_wRn_M0(wrd);
2690 gen_op_iwmmxt_set_mup();
2691 gen_op_iwmmxt_set_cup();
2693 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2694 case 0x408: case 0x508: case 0x608: case 0x708:
2695 case 0x808: case 0x908: case 0xa08: case 0xb08:
2696 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2697 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2699 wrd = (insn >> 12) & 0xf;
2700 rd0 = (insn >> 16) & 0xf;
2701 rd1 = (insn >> 0) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0);
2703 switch ((insn >> 22) & 3) {
2705 if (insn & (1 << 21))
2706 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2708 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2711 if (insn & (1 << 21))
2712 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2714 gen_op_iwmmxt_packul_M0_wRn(rd1);
2717 if (insn & (1 << 21))
2718 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2720 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2723 gen_op_iwmmxt_movq_wRn_M0(wrd);
2724 gen_op_iwmmxt_set_mup();
2725 gen_op_iwmmxt_set_cup();
2727 case 0x201: case 0x203: case 0x205: case 0x207:
2728 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2729 case 0x211: case 0x213: case 0x215: case 0x217:
2730 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2731 wrd = (insn >> 5) & 0xf;
2732 rd0 = (insn >> 12) & 0xf;
2733 rd1 = (insn >> 0) & 0xf;
2734 if (rd0 == 0xf || rd1 == 0xf)
2736 gen_op_iwmmxt_movq_M0_wRn(wrd);
2737 tmp = load_reg(s, rd0);
2738 tmp2 = load_reg(s, rd1);
2739 switch ((insn >> 16) & 0xf) {
2740 case 0x0: /* TMIA */
2741 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2743 case 0x8: /* TMIAPH */
2744 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2746 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2747 if (insn & (1 << 16))
2748 tcg_gen_shri_i32(tmp, tmp, 16);
2749 if (insn & (1 << 17))
2750 tcg_gen_shri_i32(tmp2, tmp2, 16);
2751 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2754 tcg_temp_free_i32(tmp2);
2755 tcg_temp_free_i32(tmp);
2758 tcg_temp_free_i32(tmp2);
2759 tcg_temp_free_i32(tmp);
2760 gen_op_iwmmxt_movq_wRn_M0(wrd);
2761 gen_op_iwmmxt_set_mup();
2770 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2771 (ie. an undefined instruction). */
2772 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2774 int acc, rd0, rd1, rdhi, rdlo;
2777 if ((insn & 0x0ff00f10) == 0x0e200010) {
2778 /* Multiply with Internal Accumulate Format */
2779 rd0 = (insn >> 12) & 0xf;
2781 acc = (insn >> 5) & 7;
2786 tmp = load_reg(s, rd0);
2787 tmp2 = load_reg(s, rd1);
2788 switch ((insn >> 16) & 0xf) {
2790 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2792 case 0x8: /* MIAPH */
2793 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2795 case 0xc: /* MIABB */
2796 case 0xd: /* MIABT */
2797 case 0xe: /* MIATB */
2798 case 0xf: /* MIATT */
2799 if (insn & (1 << 16))
2800 tcg_gen_shri_i32(tmp, tmp, 16);
2801 if (insn & (1 << 17))
2802 tcg_gen_shri_i32(tmp2, tmp2, 16);
2803 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2808 tcg_temp_free_i32(tmp2);
2809 tcg_temp_free_i32(tmp);
2811 gen_op_iwmmxt_movq_wRn_M0(acc);
2815 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2816 /* Internal Accumulator Access Format */
2817 rdhi = (insn >> 16) & 0xf;
2818 rdlo = (insn >> 12) & 0xf;
2824 if (insn & ARM_CP_RW_BIT) { /* MRA */
2825 iwmmxt_load_reg(cpu_V0, acc);
2826 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2827 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2828 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2829 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2831 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2832 iwmmxt_store_reg(cpu_V0, acc);
2840 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2841 #define VFP_SREG(insn, bigbit, smallbit) \
2842 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2843 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2844 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2845 reg = (((insn) >> (bigbit)) & 0x0f) \
2846 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2848 if (insn & (1 << (smallbit))) \
2850 reg = ((insn) >> (bigbit)) & 0x0f; \
2853 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2854 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2855 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2856 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2857 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2858 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2860 static void gen_neon_dup_low16(TCGv_i32 var)
2862 TCGv_i32 tmp = tcg_temp_new_i32();
2863 tcg_gen_ext16u_i32(var, var);
2864 tcg_gen_shli_i32(tmp, var, 16);
2865 tcg_gen_or_i32(var, var, tmp);
2866 tcg_temp_free_i32(tmp);
2869 static void gen_neon_dup_high16(TCGv_i32 var)
2871 TCGv_i32 tmp = tcg_temp_new_i32();
2872 tcg_gen_andi_i32(var, var, 0xffff0000);
2873 tcg_gen_shri_i32(tmp, var, 16);
2874 tcg_gen_or_i32(var, var, tmp);
2875 tcg_temp_free_i32(tmp);
2879 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2880 * (ie. an undefined instruction).
2882 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
2884 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
2889 * If the decodetree decoder handles this insn it will always
2890 * emit code to either execute the insn or generate an appropriate
2891 * exception; so we don't need to ever return non-zero to tell
2892 * the calling code to emit an UNDEF exception.
2894 if (extract32(insn, 28, 4) == 0xf) {
2895 if (disas_vfp_uncond(s, insn)) {
2899 if (disas_vfp(s, insn)) {
2903 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2907 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2909 #ifndef CONFIG_USER_ONLY
2910 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2911 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2917 static void gen_goto_ptr(void)
2919 tcg_gen_lookup_and_goto_ptr();
2922 /* This will end the TB but doesn't guarantee we'll return to
2923 * cpu_loop_exec. Any live exit_requests will be processed as we
2924 * enter the next TB.
2926 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2928 if (use_goto_tb(s, dest)) {
2930 gen_set_pc_im(s, dest);
2931 tcg_gen_exit_tb(s->base.tb, n);
2933 gen_set_pc_im(s, dest);
2936 s->base.is_jmp = DISAS_NORETURN;
2939 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2941 if (unlikely(is_singlestepping(s))) {
2942 /* An indirect jump so that we still trigger the debug exception. */
2947 gen_goto_tb(s, 0, dest);
2951 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2954 tcg_gen_sari_i32(t0, t0, 16);
2958 tcg_gen_sari_i32(t1, t1, 16);
2961 tcg_gen_mul_i32(t0, t0, t1);
2964 /* Return the mask of PSR bits set by a MSR instruction. */
2965 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2970 if (flags & (1 << 0))
2972 if (flags & (1 << 1))
2974 if (flags & (1 << 2))
2976 if (flags & (1 << 3))
2979 /* Mask out undefined bits. */
2980 mask &= ~CPSR_RESERVED;
2981 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
2984 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
2985 mask &= ~CPSR_Q; /* V5TE in reality*/
2987 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
2988 mask &= ~(CPSR_E | CPSR_GE);
2990 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
2993 /* Mask out execution state and reserved bits. */
2995 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
2997 /* Mask out privileged bits. */
3003 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3004 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3008 /* ??? This is also undefined in system mode. */
3012 tmp = load_cpu_field(spsr);
3013 tcg_gen_andi_i32(tmp, tmp, ~mask);
3014 tcg_gen_andi_i32(t0, t0, mask);
3015 tcg_gen_or_i32(tmp, tmp, t0);
3016 store_cpu_field(tmp, spsr);
3018 gen_set_cpsr(t0, mask);
3020 tcg_temp_free_i32(t0);
3025 /* Returns nonzero if access to the PSR is not permitted. */
3026 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3029 tmp = tcg_temp_new_i32();
3030 tcg_gen_movi_i32(tmp, val);
3031 return gen_set_psr(s, mask, spsr, tmp);
3034 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3035 int *tgtmode, int *regno)
3037 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3038 * the target mode and register number, and identify the various
3039 * unpredictable cases.
3040 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3041 * + executed in user mode
3042 * + using R15 as the src/dest register
3043 * + accessing an unimplemented register
3044 * + accessing a register that's inaccessible at current PL/security state*
3045 * + accessing a register that you could access with a different insn
3046 * We choose to UNDEF in all these cases.
3047 * Since we don't know which of the various AArch32 modes we are in
3048 * we have to defer some checks to runtime.
3049 * Accesses to Monitor mode registers from Secure EL1 (which implies
3050 * that EL3 is AArch64) must trap to EL3.
3052 * If the access checks fail this function will emit code to take
3053 * an exception and return false. Otherwise it will return true,
3054 * and set *tgtmode and *regno appropriately.
3056 int exc_target = default_exception_el(s);
3058 /* These instructions are present only in ARMv8, or in ARMv7 with the
3059 * Virtualization Extensions.
3061 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3062 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3066 if (IS_USER(s) || rn == 15) {
3070 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3071 * of registers into (r, sysm).
3074 /* SPSRs for other modes */
3076 case 0xe: /* SPSR_fiq */
3077 *tgtmode = ARM_CPU_MODE_FIQ;
3079 case 0x10: /* SPSR_irq */
3080 *tgtmode = ARM_CPU_MODE_IRQ;
3082 case 0x12: /* SPSR_svc */
3083 *tgtmode = ARM_CPU_MODE_SVC;
3085 case 0x14: /* SPSR_abt */
3086 *tgtmode = ARM_CPU_MODE_ABT;
3088 case 0x16: /* SPSR_und */
3089 *tgtmode = ARM_CPU_MODE_UND;
3091 case 0x1c: /* SPSR_mon */
3092 *tgtmode = ARM_CPU_MODE_MON;
3094 case 0x1e: /* SPSR_hyp */
3095 *tgtmode = ARM_CPU_MODE_HYP;
3097 default: /* unallocated */
3100 /* We arbitrarily assign SPSR a register number of 16. */
3103 /* general purpose registers for other modes */
3105 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3106 *tgtmode = ARM_CPU_MODE_USR;
3109 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3110 *tgtmode = ARM_CPU_MODE_FIQ;
3113 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3114 *tgtmode = ARM_CPU_MODE_IRQ;
3115 *regno = sysm & 1 ? 13 : 14;
3117 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3118 *tgtmode = ARM_CPU_MODE_SVC;
3119 *regno = sysm & 1 ? 13 : 14;
3121 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3122 *tgtmode = ARM_CPU_MODE_ABT;
3123 *regno = sysm & 1 ? 13 : 14;
3125 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3126 *tgtmode = ARM_CPU_MODE_UND;
3127 *regno = sysm & 1 ? 13 : 14;
3129 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3130 *tgtmode = ARM_CPU_MODE_MON;
3131 *regno = sysm & 1 ? 13 : 14;
3133 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3134 *tgtmode = ARM_CPU_MODE_HYP;
3135 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3136 *regno = sysm & 1 ? 13 : 17;
3138 default: /* unallocated */
3143 /* Catch the 'accessing inaccessible register' cases we can detect
3144 * at translate time.
3147 case ARM_CPU_MODE_MON:
3148 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3151 if (s->current_el == 1) {
3152 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3153 * then accesses to Mon registers trap to EL3
3159 case ARM_CPU_MODE_HYP:
3161 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3162 * (and so we can forbid accesses from EL2 or below). elr_hyp
3163 * can be accessed also from Hyp mode, so forbid accesses from
3166 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3167 (s->current_el < 3 && *regno != 17)) {
3178 /* If we get here then some access check did not pass */
3179 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3180 syn_uncategorized(), exc_target);
3184 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3186 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3187 int tgtmode = 0, regno = 0;
3189 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
3193 /* Sync state because msr_banked() can raise exceptions */
3194 gen_set_condexec(s);
3195 gen_set_pc_im(s, s->pc_curr);
3196 tcg_reg = load_reg(s, rn);
3197 tcg_tgtmode = tcg_const_i32(tgtmode);
3198 tcg_regno = tcg_const_i32(regno);
3199 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3200 tcg_temp_free_i32(tcg_tgtmode);
3201 tcg_temp_free_i32(tcg_regno);
3202 tcg_temp_free_i32(tcg_reg);
3203 s->base.is_jmp = DISAS_UPDATE;
3206 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3208 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3209 int tgtmode = 0, regno = 0;
3211 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
3215 /* Sync state because mrs_banked() can raise exceptions */
3216 gen_set_condexec(s);
3217 gen_set_pc_im(s, s->pc_curr);
3218 tcg_reg = tcg_temp_new_i32();
3219 tcg_tgtmode = tcg_const_i32(tgtmode);
3220 tcg_regno = tcg_const_i32(regno);
3221 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3222 tcg_temp_free_i32(tcg_tgtmode);
3223 tcg_temp_free_i32(tcg_regno);
3224 store_reg(s, rn, tcg_reg);
3225 s->base.is_jmp = DISAS_UPDATE;
3228 /* Store value to PC as for an exception return (ie don't
3229 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3230 * will do the masking based on the new value of the Thumb bit.
3232 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
3234 tcg_gen_mov_i32(cpu_R[15], pc);
3235 tcg_temp_free_i32(pc);
3238 /* Generate a v6 exception return. Marks both values as dead. */
3239 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3241 store_pc_exc_ret(s, pc);
3242 /* The cpsr_write_eret helper will mask the low bits of PC
3243 * appropriately depending on the new Thumb bit, so it must
3244 * be called after storing the new PC.
3246 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3249 gen_helper_cpsr_write_eret(cpu_env, cpsr);
3250 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3253 tcg_temp_free_i32(cpsr);
3254 /* Must exit loop to check un-masked IRQs */
3255 s->base.is_jmp = DISAS_EXIT;
3258 /* Generate an old-style exception return. Marks pc as dead. */
3259 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3261 gen_rfe(s, pc, load_cpu_field(spsr));
3265 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3266 * only call the helper when running single threaded TCG code to ensure
3267 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3268 * just skip this instruction. Currently the SEV/SEVL instructions
3269 * which are *one* of many ways to wake the CPU from WFE are not
3270 * implemented so we can't sleep like WFI does.
3272 static void gen_nop_hint(DisasContext *s, int val)
3275 /* When running in MTTCG we don't generate jumps to the yield and
3276 * WFE helpers as it won't affect the scheduling of other vCPUs.
3277 * If we wanted to more completely model WFE/SEV so we don't busy
3278 * spin unnecessarily we would need to do something more involved.
3281 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3282 gen_set_pc_im(s, s->base.pc_next);
3283 s->base.is_jmp = DISAS_YIELD;
3287 gen_set_pc_im(s, s->base.pc_next);
3288 s->base.is_jmp = DISAS_WFI;
3291 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3292 gen_set_pc_im(s, s->base.pc_next);
3293 s->base.is_jmp = DISAS_WFE;
3298 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3304 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3306 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3309 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3310 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3311 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3316 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3319 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3320 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3321 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3326 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3327 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3328 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3329 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3330 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3332 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3333 switch ((size << 1) | u) { \
3335 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3338 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3341 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3344 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3347 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3350 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3352 default: return 1; \
3355 #define GEN_NEON_INTEGER_OP(name) do { \
3356 switch ((size << 1) | u) { \
3358 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3361 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3364 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3367 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3370 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3373 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3375 default: return 1; \
3378 static TCGv_i32 neon_load_scratch(int scratch)
3380 TCGv_i32 tmp = tcg_temp_new_i32();
3381 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3385 static void neon_store_scratch(int scratch, TCGv_i32 var)
3387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3388 tcg_temp_free_i32(var);
3391 static inline TCGv_i32 neon_get_scalar(int size, int reg)
3395 tmp = neon_load_reg(reg & 7, reg >> 4);
3397 gen_neon_dup_high16(tmp);
3399 gen_neon_dup_low16(tmp);
3402 tmp = neon_load_reg(reg & 15, reg >> 4);
3407 static int gen_neon_unzip(int rd, int rm, int size, int q)
3411 if (!q && size == 2) {
3414 pd = vfp_reg_ptr(true, rd);
3415 pm = vfp_reg_ptr(true, rm);
3419 gen_helper_neon_qunzip8(pd, pm);
3422 gen_helper_neon_qunzip16(pd, pm);
3425 gen_helper_neon_qunzip32(pd, pm);
3433 gen_helper_neon_unzip8(pd, pm);
3436 gen_helper_neon_unzip16(pd, pm);
3442 tcg_temp_free_ptr(pd);
3443 tcg_temp_free_ptr(pm);
3447 static int gen_neon_zip(int rd, int rm, int size, int q)
3451 if (!q && size == 2) {
3454 pd = vfp_reg_ptr(true, rd);
3455 pm = vfp_reg_ptr(true, rm);
3459 gen_helper_neon_qzip8(pd, pm);
3462 gen_helper_neon_qzip16(pd, pm);
3465 gen_helper_neon_qzip32(pd, pm);
3473 gen_helper_neon_zip8(pd, pm);
3476 gen_helper_neon_zip16(pd, pm);
3482 tcg_temp_free_ptr(pd);
3483 tcg_temp_free_ptr(pm);
3487 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3491 rd = tcg_temp_new_i32();
3492 tmp = tcg_temp_new_i32();
3494 tcg_gen_shli_i32(rd, t0, 8);
3495 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3496 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3497 tcg_gen_or_i32(rd, rd, tmp);
3499 tcg_gen_shri_i32(t1, t1, 8);
3500 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3501 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3502 tcg_gen_or_i32(t1, t1, tmp);
3503 tcg_gen_mov_i32(t0, rd);
3505 tcg_temp_free_i32(tmp);
3506 tcg_temp_free_i32(rd);
3509 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3513 rd = tcg_temp_new_i32();
3514 tmp = tcg_temp_new_i32();
3516 tcg_gen_shli_i32(rd, t0, 16);
3517 tcg_gen_andi_i32(tmp, t1, 0xffff);
3518 tcg_gen_or_i32(rd, rd, tmp);
3519 tcg_gen_shri_i32(t1, t1, 16);
3520 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3521 tcg_gen_or_i32(t1, t1, tmp);
3522 tcg_gen_mov_i32(t0, rd);
3524 tcg_temp_free_i32(tmp);
3525 tcg_temp_free_i32(rd);
3533 } const neon_ls_element_type[11] = {
3547 /* Translate a NEON load/store element instruction. Return nonzero if the
3548 instruction is invalid. */
3549 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
3569 /* FIXME: this access check should not take precedence over UNDEF
3570 * for invalid encodings; we will generate incorrect syndrome information
3571 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3573 if (s->fp_excp_el) {
3574 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3575 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
3579 if (!s->vfp_enabled)
3581 VFP_DREG_D(rd, insn);
3582 rn = (insn >> 16) & 0xf;
3584 load = (insn & (1 << 21)) != 0;
3585 endian = s->be_data;
3586 mmu_idx = get_mem_index(s);
3587 if ((insn & (1 << 23)) == 0) {
3588 /* Load store all elements. */
3589 op = (insn >> 8) & 0xf;
3590 size = (insn >> 6) & 3;
3593 /* Catch UNDEF cases for bad values of align field */
3596 if (((insn >> 5) & 1) == 1) {
3601 if (((insn >> 4) & 3) == 3) {
3608 nregs = neon_ls_element_type[op].nregs;
3609 interleave = neon_ls_element_type[op].interleave;
3610 spacing = neon_ls_element_type[op].spacing;
3611 if (size == 3 && (interleave | spacing) != 1) {
3614 /* For our purposes, bytes are always little-endian. */
3618 /* Consecutive little-endian elements from a single register
3619 * can be promoted to a larger little-endian operation.
3621 if (interleave == 1 && endian == MO_LE) {
3624 tmp64 = tcg_temp_new_i64();
3625 addr = tcg_temp_new_i32();
3626 tmp2 = tcg_const_i32(1 << size);
3627 load_reg_var(s, addr, rn);
3628 for (reg = 0; reg < nregs; reg++) {
3629 for (n = 0; n < 8 >> size; n++) {
3631 for (xs = 0; xs < interleave; xs++) {
3632 int tt = rd + reg + spacing * xs;
3635 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3636 neon_store_element64(tt, n, size, tmp64);
3638 neon_load_element64(tmp64, tt, n, size);
3639 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
3641 tcg_gen_add_i32(addr, addr, tmp2);
3645 tcg_temp_free_i32(addr);
3646 tcg_temp_free_i32(tmp2);
3647 tcg_temp_free_i64(tmp64);
3648 stride = nregs * interleave * 8;
3650 size = (insn >> 10) & 3;
3652 /* Load single element to all lanes. */
3653 int a = (insn >> 4) & 1;
3657 size = (insn >> 6) & 3;
3658 nregs = ((insn >> 8) & 3) + 1;
3661 if (nregs != 4 || a == 0) {
3664 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3667 if (nregs == 1 && a == 1 && size == 0) {
3670 if (nregs == 3 && a == 1) {
3673 addr = tcg_temp_new_i32();
3674 load_reg_var(s, addr, rn);
3676 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3677 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3679 stride = (insn & (1 << 5)) ? 2 : 1;
3680 vec_size = nregs == 1 ? stride * 8 : 8;
3682 tmp = tcg_temp_new_i32();
3683 for (reg = 0; reg < nregs; reg++) {
3684 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3686 if ((rd & 1) && vec_size == 16) {
3687 /* We cannot write 16 bytes at once because the
3688 * destination is unaligned.
3690 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3692 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3693 neon_reg_offset(rd, 0), 8, 8);
3695 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3696 vec_size, vec_size, tmp);
3698 tcg_gen_addi_i32(addr, addr, 1 << size);
3701 tcg_temp_free_i32(tmp);
3702 tcg_temp_free_i32(addr);
3703 stride = (1 << size) * nregs;
3705 /* Single element. */
3706 int idx = (insn >> 4) & 0xf;
3710 reg_idx = (insn >> 5) & 7;
3714 reg_idx = (insn >> 6) & 3;
3715 stride = (insn & (1 << 5)) ? 2 : 1;
3718 reg_idx = (insn >> 7) & 1;
3719 stride = (insn & (1 << 6)) ? 2 : 1;
3724 nregs = ((insn >> 8) & 3) + 1;
3725 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3728 if (((idx & (1 << size)) != 0) ||
3729 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3734 if ((idx & 1) != 0) {
3739 if (size == 2 && (idx & 2) != 0) {
3744 if ((size == 2) && ((idx & 3) == 3)) {
3751 if ((rd + stride * (nregs - 1)) > 31) {
3752 /* Attempts to write off the end of the register file
3753 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3754 * the neon_load_reg() would write off the end of the array.
3758 tmp = tcg_temp_new_i32();
3759 addr = tcg_temp_new_i32();
3760 load_reg_var(s, addr, rn);
3761 for (reg = 0; reg < nregs; reg++) {
3763 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3765 neon_store_element(rd, reg_idx, size, tmp);
3766 } else { /* Store */
3767 neon_load_element(tmp, rd, reg_idx, size);
3768 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3772 tcg_gen_addi_i32(addr, addr, 1 << size);
3774 tcg_temp_free_i32(addr);
3775 tcg_temp_free_i32(tmp);
3776 stride = nregs * (1 << size);
3782 base = load_reg(s, rn);
3784 tcg_gen_addi_i32(base, base, stride);
3787 index = load_reg(s, rm);
3788 tcg_gen_add_i32(base, base, index);
3789 tcg_temp_free_i32(index);
3791 store_reg(s, rn, base);
3796 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
3799 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3800 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3801 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
3806 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3809 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3810 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3811 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3816 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
3819 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3820 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3821 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3826 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3829 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3830 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3831 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
3836 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
3842 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3843 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3848 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3849 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3856 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3857 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
3862 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3863 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3870 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
3874 case 0: gen_helper_neon_widen_u8(dest, src); break;
3875 case 1: gen_helper_neon_widen_u16(dest, src); break;
3876 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3881 case 0: gen_helper_neon_widen_s8(dest, src); break;
3882 case 1: gen_helper_neon_widen_s16(dest, src); break;
3883 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3887 tcg_temp_free_i32(src);
3890 static inline void gen_neon_addl(int size)
3893 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3894 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3895 case 2: tcg_gen_add_i64(CPU_V001); break;
3900 static inline void gen_neon_subl(int size)
3903 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3904 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3905 case 2: tcg_gen_sub_i64(CPU_V001); break;
3910 static inline void gen_neon_negl(TCGv_i64 var, int size)
3913 case 0: gen_helper_neon_negl_u16(var, var); break;
3914 case 1: gen_helper_neon_negl_u32(var, var); break;
3916 tcg_gen_neg_i64(var, var);
3922 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
3925 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3926 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
3931 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3936 switch ((size << 1) | u) {
3937 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3938 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3939 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3940 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3942 tmp = gen_muls_i64_i32(a, b);
3943 tcg_gen_mov_i64(dest, tmp);
3944 tcg_temp_free_i64(tmp);
3947 tmp = gen_mulu_i64_i32(a, b);
3948 tcg_gen_mov_i64(dest, tmp);
3949 tcg_temp_free_i64(tmp);
3954 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3955 Don't forget to clean them now. */
3957 tcg_temp_free_i32(a);
3958 tcg_temp_free_i32(b);
3962 static void gen_neon_narrow_op(int op, int u, int size,
3963 TCGv_i32 dest, TCGv_i64 src)
3967 gen_neon_unarrow_sats(size, dest, src);
3969 gen_neon_narrow(size, dest, src);
3973 gen_neon_narrow_satu(size, dest, src);
3975 gen_neon_narrow_sats(size, dest, src);
3980 /* Symbolic constants for op fields for Neon 3-register same-length.
3981 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3984 #define NEON_3R_VHADD 0
3985 #define NEON_3R_VQADD 1
3986 #define NEON_3R_VRHADD 2
3987 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3988 #define NEON_3R_VHSUB 4
3989 #define NEON_3R_VQSUB 5
3990 #define NEON_3R_VCGT 6
3991 #define NEON_3R_VCGE 7
3992 #define NEON_3R_VSHL 8
3993 #define NEON_3R_VQSHL 9
3994 #define NEON_3R_VRSHL 10
3995 #define NEON_3R_VQRSHL 11
3996 #define NEON_3R_VMAX 12
3997 #define NEON_3R_VMIN 13
3998 #define NEON_3R_VABD 14
3999 #define NEON_3R_VABA 15
4000 #define NEON_3R_VADD_VSUB 16
4001 #define NEON_3R_VTST_VCEQ 17
4002 #define NEON_3R_VML 18 /* VMLA, VMLS */
4003 #define NEON_3R_VMUL 19
4004 #define NEON_3R_VPMAX 20
4005 #define NEON_3R_VPMIN 21
4006 #define NEON_3R_VQDMULH_VQRDMULH 22
4007 #define NEON_3R_VPADD_VQRDMLAH 23
4008 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4009 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
4010 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4011 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4012 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4013 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4014 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4015 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4017 static const uint8_t neon_3r_sizes[] = {
4018 [NEON_3R_VHADD] = 0x7,
4019 [NEON_3R_VQADD] = 0xf,
4020 [NEON_3R_VRHADD] = 0x7,
4021 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4022 [NEON_3R_VHSUB] = 0x7,
4023 [NEON_3R_VQSUB] = 0xf,
4024 [NEON_3R_VCGT] = 0x7,
4025 [NEON_3R_VCGE] = 0x7,
4026 [NEON_3R_VSHL] = 0xf,
4027 [NEON_3R_VQSHL] = 0xf,
4028 [NEON_3R_VRSHL] = 0xf,
4029 [NEON_3R_VQRSHL] = 0xf,
4030 [NEON_3R_VMAX] = 0x7,
4031 [NEON_3R_VMIN] = 0x7,
4032 [NEON_3R_VABD] = 0x7,
4033 [NEON_3R_VABA] = 0x7,
4034 [NEON_3R_VADD_VSUB] = 0xf,
4035 [NEON_3R_VTST_VCEQ] = 0x7,
4036 [NEON_3R_VML] = 0x7,
4037 [NEON_3R_VMUL] = 0x7,
4038 [NEON_3R_VPMAX] = 0x7,
4039 [NEON_3R_VPMIN] = 0x7,
4040 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4041 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
4042 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4043 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
4044 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4045 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4046 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4047 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4048 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4049 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4052 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4053 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4056 #define NEON_2RM_VREV64 0
4057 #define NEON_2RM_VREV32 1
4058 #define NEON_2RM_VREV16 2
4059 #define NEON_2RM_VPADDL 4
4060 #define NEON_2RM_VPADDL_U 5
4061 #define NEON_2RM_AESE 6 /* Includes AESD */
4062 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4063 #define NEON_2RM_VCLS 8
4064 #define NEON_2RM_VCLZ 9
4065 #define NEON_2RM_VCNT 10
4066 #define NEON_2RM_VMVN 11
4067 #define NEON_2RM_VPADAL 12
4068 #define NEON_2RM_VPADAL_U 13
4069 #define NEON_2RM_VQABS 14
4070 #define NEON_2RM_VQNEG 15
4071 #define NEON_2RM_VCGT0 16
4072 #define NEON_2RM_VCGE0 17
4073 #define NEON_2RM_VCEQ0 18
4074 #define NEON_2RM_VCLE0 19
4075 #define NEON_2RM_VCLT0 20
4076 #define NEON_2RM_SHA1H 21
4077 #define NEON_2RM_VABS 22
4078 #define NEON_2RM_VNEG 23
4079 #define NEON_2RM_VCGT0_F 24
4080 #define NEON_2RM_VCGE0_F 25
4081 #define NEON_2RM_VCEQ0_F 26
4082 #define NEON_2RM_VCLE0_F 27
4083 #define NEON_2RM_VCLT0_F 28
4084 #define NEON_2RM_VABS_F 30
4085 #define NEON_2RM_VNEG_F 31
4086 #define NEON_2RM_VSWP 32
4087 #define NEON_2RM_VTRN 33
4088 #define NEON_2RM_VUZP 34
4089 #define NEON_2RM_VZIP 35
4090 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4091 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4092 #define NEON_2RM_VSHLL 38
4093 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4094 #define NEON_2RM_VRINTN 40
4095 #define NEON_2RM_VRINTX 41
4096 #define NEON_2RM_VRINTA 42
4097 #define NEON_2RM_VRINTZ 43
4098 #define NEON_2RM_VCVT_F16_F32 44
4099 #define NEON_2RM_VRINTM 45
4100 #define NEON_2RM_VCVT_F32_F16 46
4101 #define NEON_2RM_VRINTP 47
4102 #define NEON_2RM_VCVTAU 48
4103 #define NEON_2RM_VCVTAS 49
4104 #define NEON_2RM_VCVTNU 50
4105 #define NEON_2RM_VCVTNS 51
4106 #define NEON_2RM_VCVTPU 52
4107 #define NEON_2RM_VCVTPS 53
4108 #define NEON_2RM_VCVTMU 54
4109 #define NEON_2RM_VCVTMS 55
4110 #define NEON_2RM_VRECPE 56
4111 #define NEON_2RM_VRSQRTE 57
4112 #define NEON_2RM_VRECPE_F 58
4113 #define NEON_2RM_VRSQRTE_F 59
4114 #define NEON_2RM_VCVT_FS 60
4115 #define NEON_2RM_VCVT_FU 61
4116 #define NEON_2RM_VCVT_SF 62
4117 #define NEON_2RM_VCVT_UF 63
4119 static bool neon_2rm_is_v8_op(int op)
4121 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4123 case NEON_2RM_VRINTN:
4124 case NEON_2RM_VRINTA:
4125 case NEON_2RM_VRINTM:
4126 case NEON_2RM_VRINTP:
4127 case NEON_2RM_VRINTZ:
4128 case NEON_2RM_VRINTX:
4129 case NEON_2RM_VCVTAU:
4130 case NEON_2RM_VCVTAS:
4131 case NEON_2RM_VCVTNU:
4132 case NEON_2RM_VCVTNS:
4133 case NEON_2RM_VCVTPU:
4134 case NEON_2RM_VCVTPS:
4135 case NEON_2RM_VCVTMU:
4136 case NEON_2RM_VCVTMS:
4143 /* Each entry in this array has bit n set if the insn allows
4144 * size value n (otherwise it will UNDEF). Since unallocated
4145 * op values will have no bits set they always UNDEF.
4147 static const uint8_t neon_2rm_sizes[] = {
4148 [NEON_2RM_VREV64] = 0x7,
4149 [NEON_2RM_VREV32] = 0x3,
4150 [NEON_2RM_VREV16] = 0x1,
4151 [NEON_2RM_VPADDL] = 0x7,
4152 [NEON_2RM_VPADDL_U] = 0x7,
4153 [NEON_2RM_AESE] = 0x1,
4154 [NEON_2RM_AESMC] = 0x1,
4155 [NEON_2RM_VCLS] = 0x7,
4156 [NEON_2RM_VCLZ] = 0x7,
4157 [NEON_2RM_VCNT] = 0x1,
4158 [NEON_2RM_VMVN] = 0x1,
4159 [NEON_2RM_VPADAL] = 0x7,
4160 [NEON_2RM_VPADAL_U] = 0x7,
4161 [NEON_2RM_VQABS] = 0x7,
4162 [NEON_2RM_VQNEG] = 0x7,
4163 [NEON_2RM_VCGT0] = 0x7,
4164 [NEON_2RM_VCGE0] = 0x7,
4165 [NEON_2RM_VCEQ0] = 0x7,
4166 [NEON_2RM_VCLE0] = 0x7,
4167 [NEON_2RM_VCLT0] = 0x7,
4168 [NEON_2RM_SHA1H] = 0x4,
4169 [NEON_2RM_VABS] = 0x7,
4170 [NEON_2RM_VNEG] = 0x7,
4171 [NEON_2RM_VCGT0_F] = 0x4,
4172 [NEON_2RM_VCGE0_F] = 0x4,
4173 [NEON_2RM_VCEQ0_F] = 0x4,
4174 [NEON_2RM_VCLE0_F] = 0x4,
4175 [NEON_2RM_VCLT0_F] = 0x4,
4176 [NEON_2RM_VABS_F] = 0x4,
4177 [NEON_2RM_VNEG_F] = 0x4,
4178 [NEON_2RM_VSWP] = 0x1,
4179 [NEON_2RM_VTRN] = 0x7,
4180 [NEON_2RM_VUZP] = 0x7,
4181 [NEON_2RM_VZIP] = 0x7,
4182 [NEON_2RM_VMOVN] = 0x7,
4183 [NEON_2RM_VQMOVN] = 0x7,
4184 [NEON_2RM_VSHLL] = 0x7,
4185 [NEON_2RM_SHA1SU1] = 0x4,
4186 [NEON_2RM_VRINTN] = 0x4,
4187 [NEON_2RM_VRINTX] = 0x4,
4188 [NEON_2RM_VRINTA] = 0x4,
4189 [NEON_2RM_VRINTZ] = 0x4,
4190 [NEON_2RM_VCVT_F16_F32] = 0x2,
4191 [NEON_2RM_VRINTM] = 0x4,
4192 [NEON_2RM_VCVT_F32_F16] = 0x2,
4193 [NEON_2RM_VRINTP] = 0x4,
4194 [NEON_2RM_VCVTAU] = 0x4,
4195 [NEON_2RM_VCVTAS] = 0x4,
4196 [NEON_2RM_VCVTNU] = 0x4,
4197 [NEON_2RM_VCVTNS] = 0x4,
4198 [NEON_2RM_VCVTPU] = 0x4,
4199 [NEON_2RM_VCVTPS] = 0x4,
4200 [NEON_2RM_VCVTMU] = 0x4,
4201 [NEON_2RM_VCVTMS] = 0x4,
4202 [NEON_2RM_VRECPE] = 0x4,
4203 [NEON_2RM_VRSQRTE] = 0x4,
4204 [NEON_2RM_VRECPE_F] = 0x4,
4205 [NEON_2RM_VRSQRTE_F] = 0x4,
4206 [NEON_2RM_VCVT_FS] = 0x4,
4207 [NEON_2RM_VCVT_FU] = 0x4,
4208 [NEON_2RM_VCVT_SF] = 0x4,
4209 [NEON_2RM_VCVT_UF] = 0x4,
4213 /* Expand v8.1 simd helper. */
4214 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4215 int q, int rd, int rn, int rm)
4217 if (dc_isar_feature(aa32_rdm, s)) {
4218 int opr_sz = (1 + q) * 8;
4219 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4220 vfp_reg_offset(1, rn),
4221 vfp_reg_offset(1, rm), cpu_env,
4222 opr_sz, opr_sz, 0, fn);
4228 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4230 tcg_gen_vec_sar8i_i64(a, a, shift);
4231 tcg_gen_vec_add8_i64(d, d, a);
4234 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4236 tcg_gen_vec_sar16i_i64(a, a, shift);
4237 tcg_gen_vec_add16_i64(d, d, a);
4240 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4242 tcg_gen_sari_i32(a, a, shift);
4243 tcg_gen_add_i32(d, d, a);
4246 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4248 tcg_gen_sari_i64(a, a, shift);
4249 tcg_gen_add_i64(d, d, a);
4252 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4254 tcg_gen_sari_vec(vece, a, a, sh);
4255 tcg_gen_add_vec(vece, d, d, a);
4258 static const TCGOpcode vecop_list_ssra[] = {
4259 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4262 const GVecGen2i ssra_op[4] = {
4263 { .fni8 = gen_ssra8_i64,
4264 .fniv = gen_ssra_vec,
4266 .opt_opc = vecop_list_ssra,
4268 { .fni8 = gen_ssra16_i64,
4269 .fniv = gen_ssra_vec,
4271 .opt_opc = vecop_list_ssra,
4273 { .fni4 = gen_ssra32_i32,
4274 .fniv = gen_ssra_vec,
4276 .opt_opc = vecop_list_ssra,
4278 { .fni8 = gen_ssra64_i64,
4279 .fniv = gen_ssra_vec,
4280 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4281 .opt_opc = vecop_list_ssra,
4286 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4288 tcg_gen_vec_shr8i_i64(a, a, shift);
4289 tcg_gen_vec_add8_i64(d, d, a);
4292 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4294 tcg_gen_vec_shr16i_i64(a, a, shift);
4295 tcg_gen_vec_add16_i64(d, d, a);
4298 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4300 tcg_gen_shri_i32(a, a, shift);
4301 tcg_gen_add_i32(d, d, a);
4304 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4306 tcg_gen_shri_i64(a, a, shift);
4307 tcg_gen_add_i64(d, d, a);
4310 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4312 tcg_gen_shri_vec(vece, a, a, sh);
4313 tcg_gen_add_vec(vece, d, d, a);
4316 static const TCGOpcode vecop_list_usra[] = {
4317 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4320 const GVecGen2i usra_op[4] = {
4321 { .fni8 = gen_usra8_i64,
4322 .fniv = gen_usra_vec,
4324 .opt_opc = vecop_list_usra,
4326 { .fni8 = gen_usra16_i64,
4327 .fniv = gen_usra_vec,
4329 .opt_opc = vecop_list_usra,
4331 { .fni4 = gen_usra32_i32,
4332 .fniv = gen_usra_vec,
4334 .opt_opc = vecop_list_usra,
4336 { .fni8 = gen_usra64_i64,
4337 .fniv = gen_usra_vec,
4338 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4340 .opt_opc = vecop_list_usra,
4344 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4346 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4347 TCGv_i64 t = tcg_temp_new_i64();
4349 tcg_gen_shri_i64(t, a, shift);
4350 tcg_gen_andi_i64(t, t, mask);
4351 tcg_gen_andi_i64(d, d, ~mask);
4352 tcg_gen_or_i64(d, d, t);
4353 tcg_temp_free_i64(t);
4356 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4358 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4359 TCGv_i64 t = tcg_temp_new_i64();
4361 tcg_gen_shri_i64(t, a, shift);
4362 tcg_gen_andi_i64(t, t, mask);
4363 tcg_gen_andi_i64(d, d, ~mask);
4364 tcg_gen_or_i64(d, d, t);
4365 tcg_temp_free_i64(t);
4368 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4370 tcg_gen_shri_i32(a, a, shift);
4371 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4374 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4376 tcg_gen_shri_i64(a, a, shift);
4377 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4380 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4383 tcg_gen_mov_vec(d, a);
4385 TCGv_vec t = tcg_temp_new_vec_matching(d);
4386 TCGv_vec m = tcg_temp_new_vec_matching(d);
4388 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4389 tcg_gen_shri_vec(vece, t, a, sh);
4390 tcg_gen_and_vec(vece, d, d, m);
4391 tcg_gen_or_vec(vece, d, d, t);
4393 tcg_temp_free_vec(t);
4394 tcg_temp_free_vec(m);
4398 static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4400 const GVecGen2i sri_op[4] = {
4401 { .fni8 = gen_shr8_ins_i64,
4402 .fniv = gen_shr_ins_vec,
4404 .opt_opc = vecop_list_sri,
4406 { .fni8 = gen_shr16_ins_i64,
4407 .fniv = gen_shr_ins_vec,
4409 .opt_opc = vecop_list_sri,
4411 { .fni4 = gen_shr32_ins_i32,
4412 .fniv = gen_shr_ins_vec,
4414 .opt_opc = vecop_list_sri,
4416 { .fni8 = gen_shr64_ins_i64,
4417 .fniv = gen_shr_ins_vec,
4418 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4420 .opt_opc = vecop_list_sri,
4424 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4426 uint64_t mask = dup_const(MO_8, 0xff << shift);
4427 TCGv_i64 t = tcg_temp_new_i64();
4429 tcg_gen_shli_i64(t, a, shift);
4430 tcg_gen_andi_i64(t, t, mask);
4431 tcg_gen_andi_i64(d, d, ~mask);
4432 tcg_gen_or_i64(d, d, t);
4433 tcg_temp_free_i64(t);
4436 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4438 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4439 TCGv_i64 t = tcg_temp_new_i64();
4441 tcg_gen_shli_i64(t, a, shift);
4442 tcg_gen_andi_i64(t, t, mask);
4443 tcg_gen_andi_i64(d, d, ~mask);
4444 tcg_gen_or_i64(d, d, t);
4445 tcg_temp_free_i64(t);
4448 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4450 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4453 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4455 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4458 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4461 tcg_gen_mov_vec(d, a);
4463 TCGv_vec t = tcg_temp_new_vec_matching(d);
4464 TCGv_vec m = tcg_temp_new_vec_matching(d);
4466 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4467 tcg_gen_shli_vec(vece, t, a, sh);
4468 tcg_gen_and_vec(vece, d, d, m);
4469 tcg_gen_or_vec(vece, d, d, t);
4471 tcg_temp_free_vec(t);
4472 tcg_temp_free_vec(m);
4476 static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4478 const GVecGen2i sli_op[4] = {
4479 { .fni8 = gen_shl8_ins_i64,
4480 .fniv = gen_shl_ins_vec,
4482 .opt_opc = vecop_list_sli,
4484 { .fni8 = gen_shl16_ins_i64,
4485 .fniv = gen_shl_ins_vec,
4487 .opt_opc = vecop_list_sli,
4489 { .fni4 = gen_shl32_ins_i32,
4490 .fniv = gen_shl_ins_vec,
4492 .opt_opc = vecop_list_sli,
4494 { .fni8 = gen_shl64_ins_i64,
4495 .fniv = gen_shl_ins_vec,
4496 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4498 .opt_opc = vecop_list_sli,
4502 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4504 gen_helper_neon_mul_u8(a, a, b);
4505 gen_helper_neon_add_u8(d, d, a);
4508 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4510 gen_helper_neon_mul_u8(a, a, b);
4511 gen_helper_neon_sub_u8(d, d, a);
4514 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4516 gen_helper_neon_mul_u16(a, a, b);
4517 gen_helper_neon_add_u16(d, d, a);
4520 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4522 gen_helper_neon_mul_u16(a, a, b);
4523 gen_helper_neon_sub_u16(d, d, a);
4526 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4528 tcg_gen_mul_i32(a, a, b);
4529 tcg_gen_add_i32(d, d, a);
4532 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4534 tcg_gen_mul_i32(a, a, b);
4535 tcg_gen_sub_i32(d, d, a);
4538 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4540 tcg_gen_mul_i64(a, a, b);
4541 tcg_gen_add_i64(d, d, a);
4544 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4546 tcg_gen_mul_i64(a, a, b);
4547 tcg_gen_sub_i64(d, d, a);
4550 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4552 tcg_gen_mul_vec(vece, a, a, b);
4553 tcg_gen_add_vec(vece, d, d, a);
4556 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4558 tcg_gen_mul_vec(vece, a, a, b);
4559 tcg_gen_sub_vec(vece, d, d, a);
4562 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4563 * these tables are shared with AArch64 which does support them.
4566 static const TCGOpcode vecop_list_mla[] = {
4567 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4570 static const TCGOpcode vecop_list_mls[] = {
4571 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4574 const GVecGen3 mla_op[4] = {
4575 { .fni4 = gen_mla8_i32,
4576 .fniv = gen_mla_vec,
4578 .opt_opc = vecop_list_mla,
4580 { .fni4 = gen_mla16_i32,
4581 .fniv = gen_mla_vec,
4583 .opt_opc = vecop_list_mla,
4585 { .fni4 = gen_mla32_i32,
4586 .fniv = gen_mla_vec,
4588 .opt_opc = vecop_list_mla,
4590 { .fni8 = gen_mla64_i64,
4591 .fniv = gen_mla_vec,
4592 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4594 .opt_opc = vecop_list_mla,
4598 const GVecGen3 mls_op[4] = {
4599 { .fni4 = gen_mls8_i32,
4600 .fniv = gen_mls_vec,
4602 .opt_opc = vecop_list_mls,
4604 { .fni4 = gen_mls16_i32,
4605 .fniv = gen_mls_vec,
4607 .opt_opc = vecop_list_mls,
4609 { .fni4 = gen_mls32_i32,
4610 .fniv = gen_mls_vec,
4612 .opt_opc = vecop_list_mls,
4614 { .fni8 = gen_mls64_i64,
4615 .fniv = gen_mls_vec,
4616 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4618 .opt_opc = vecop_list_mls,
4622 /* CMTST : test is "if (X & Y != 0)". */
4623 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4625 tcg_gen_and_i32(d, a, b);
4626 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4627 tcg_gen_neg_i32(d, d);
4630 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4632 tcg_gen_and_i64(d, a, b);
4633 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4634 tcg_gen_neg_i64(d, d);
4637 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4639 tcg_gen_and_vec(vece, d, a, b);
4640 tcg_gen_dupi_vec(vece, a, 0);
4641 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4644 static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4646 const GVecGen3 cmtst_op[4] = {
4647 { .fni4 = gen_helper_neon_tst_u8,
4648 .fniv = gen_cmtst_vec,
4649 .opt_opc = vecop_list_cmtst,
4651 { .fni4 = gen_helper_neon_tst_u16,
4652 .fniv = gen_cmtst_vec,
4653 .opt_opc = vecop_list_cmtst,
4655 { .fni4 = gen_cmtst_i32,
4656 .fniv = gen_cmtst_vec,
4657 .opt_opc = vecop_list_cmtst,
4659 { .fni8 = gen_cmtst_i64,
4660 .fniv = gen_cmtst_vec,
4661 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4662 .opt_opc = vecop_list_cmtst,
4666 static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4667 TCGv_vec a, TCGv_vec b)
4669 TCGv_vec x = tcg_temp_new_vec_matching(t);
4670 tcg_gen_add_vec(vece, x, a, b);
4671 tcg_gen_usadd_vec(vece, t, a, b);
4672 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4673 tcg_gen_or_vec(vece, sat, sat, x);
4674 tcg_temp_free_vec(x);
4677 static const TCGOpcode vecop_list_uqadd[] = {
4678 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4681 const GVecGen4 uqadd_op[4] = {
4682 { .fniv = gen_uqadd_vec,
4683 .fno = gen_helper_gvec_uqadd_b,
4685 .opt_opc = vecop_list_uqadd,
4687 { .fniv = gen_uqadd_vec,
4688 .fno = gen_helper_gvec_uqadd_h,
4690 .opt_opc = vecop_list_uqadd,
4692 { .fniv = gen_uqadd_vec,
4693 .fno = gen_helper_gvec_uqadd_s,
4695 .opt_opc = vecop_list_uqadd,
4697 { .fniv = gen_uqadd_vec,
4698 .fno = gen_helper_gvec_uqadd_d,
4700 .opt_opc = vecop_list_uqadd,
4704 static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4705 TCGv_vec a, TCGv_vec b)
4707 TCGv_vec x = tcg_temp_new_vec_matching(t);
4708 tcg_gen_add_vec(vece, x, a, b);
4709 tcg_gen_ssadd_vec(vece, t, a, b);
4710 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4711 tcg_gen_or_vec(vece, sat, sat, x);
4712 tcg_temp_free_vec(x);
4715 static const TCGOpcode vecop_list_sqadd[] = {
4716 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4719 const GVecGen4 sqadd_op[4] = {
4720 { .fniv = gen_sqadd_vec,
4721 .fno = gen_helper_gvec_sqadd_b,
4722 .opt_opc = vecop_list_sqadd,
4725 { .fniv = gen_sqadd_vec,
4726 .fno = gen_helper_gvec_sqadd_h,
4727 .opt_opc = vecop_list_sqadd,
4730 { .fniv = gen_sqadd_vec,
4731 .fno = gen_helper_gvec_sqadd_s,
4732 .opt_opc = vecop_list_sqadd,
4735 { .fniv = gen_sqadd_vec,
4736 .fno = gen_helper_gvec_sqadd_d,
4737 .opt_opc = vecop_list_sqadd,
4742 static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4743 TCGv_vec a, TCGv_vec b)
4745 TCGv_vec x = tcg_temp_new_vec_matching(t);
4746 tcg_gen_sub_vec(vece, x, a, b);
4747 tcg_gen_ussub_vec(vece, t, a, b);
4748 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4749 tcg_gen_or_vec(vece, sat, sat, x);
4750 tcg_temp_free_vec(x);
4753 static const TCGOpcode vecop_list_uqsub[] = {
4754 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4757 const GVecGen4 uqsub_op[4] = {
4758 { .fniv = gen_uqsub_vec,
4759 .fno = gen_helper_gvec_uqsub_b,
4760 .opt_opc = vecop_list_uqsub,
4763 { .fniv = gen_uqsub_vec,
4764 .fno = gen_helper_gvec_uqsub_h,
4765 .opt_opc = vecop_list_uqsub,
4768 { .fniv = gen_uqsub_vec,
4769 .fno = gen_helper_gvec_uqsub_s,
4770 .opt_opc = vecop_list_uqsub,
4773 { .fniv = gen_uqsub_vec,
4774 .fno = gen_helper_gvec_uqsub_d,
4775 .opt_opc = vecop_list_uqsub,
4780 static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4781 TCGv_vec a, TCGv_vec b)
4783 TCGv_vec x = tcg_temp_new_vec_matching(t);
4784 tcg_gen_sub_vec(vece, x, a, b);
4785 tcg_gen_sssub_vec(vece, t, a, b);
4786 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4787 tcg_gen_or_vec(vece, sat, sat, x);
4788 tcg_temp_free_vec(x);
4791 static const TCGOpcode vecop_list_sqsub[] = {
4792 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4795 const GVecGen4 sqsub_op[4] = {
4796 { .fniv = gen_sqsub_vec,
4797 .fno = gen_helper_gvec_sqsub_b,
4798 .opt_opc = vecop_list_sqsub,
4801 { .fniv = gen_sqsub_vec,
4802 .fno = gen_helper_gvec_sqsub_h,
4803 .opt_opc = vecop_list_sqsub,
4806 { .fniv = gen_sqsub_vec,
4807 .fno = gen_helper_gvec_sqsub_s,
4808 .opt_opc = vecop_list_sqsub,
4811 { .fniv = gen_sqsub_vec,
4812 .fno = gen_helper_gvec_sqsub_d,
4813 .opt_opc = vecop_list_sqsub,
4818 /* Translate a NEON data processing instruction. Return nonzero if the
4819 instruction is invalid.
4820 We process data in a mixture of 32-bit and 64-bit chunks.
4821 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4823 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
4827 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
4836 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4837 TCGv_ptr ptr1, ptr2, ptr3;
4840 /* FIXME: this access check should not take precedence over UNDEF
4841 * for invalid encodings; we will generate incorrect syndrome information
4842 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4844 if (s->fp_excp_el) {
4845 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4846 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
4850 if (!s->vfp_enabled)
4852 q = (insn & (1 << 6)) != 0;
4853 u = (insn >> 24) & 1;
4854 VFP_DREG_D(rd, insn);
4855 VFP_DREG_N(rn, insn);
4856 VFP_DREG_M(rm, insn);
4857 size = (insn >> 20) & 3;
4858 vec_size = q ? 16 : 8;
4859 rd_ofs = neon_reg_offset(rd, 0);
4860 rn_ofs = neon_reg_offset(rn, 0);
4861 rm_ofs = neon_reg_offset(rm, 0);
4863 if ((insn & (1 << 23)) == 0) {
4864 /* Three register same length. */
4865 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4866 /* Catch invalid op and bad size combinations: UNDEF */
4867 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4870 /* All insns of this form UNDEF for either this condition or the
4871 * superset of cases "Q==1"; we catch the latter later.
4873 if (q && ((rd | rn | rm) & 1)) {
4878 /* The SHA-1/SHA-256 3-register instructions require special
4879 * treatment here, as their size field is overloaded as an
4880 * op type selector, and they all consume their input in a
4886 if (!u) { /* SHA-1 */
4887 if (!dc_isar_feature(aa32_sha1, s)) {
4890 ptr1 = vfp_reg_ptr(true, rd);
4891 ptr2 = vfp_reg_ptr(true, rn);
4892 ptr3 = vfp_reg_ptr(true, rm);
4893 tmp4 = tcg_const_i32(size);
4894 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
4895 tcg_temp_free_i32(tmp4);
4896 } else { /* SHA-256 */
4897 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
4900 ptr1 = vfp_reg_ptr(true, rd);
4901 ptr2 = vfp_reg_ptr(true, rn);
4902 ptr3 = vfp_reg_ptr(true, rm);
4905 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
4908 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
4911 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
4915 tcg_temp_free_ptr(ptr1);
4916 tcg_temp_free_ptr(ptr2);
4917 tcg_temp_free_ptr(ptr3);
4920 case NEON_3R_VPADD_VQRDMLAH:
4927 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4930 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4935 case NEON_3R_VFM_VQRDMLSH:
4946 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4949 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4954 case NEON_3R_LOGIC: /* Logic ops. */
4955 switch ((u << 2) | size) {
4957 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4958 vec_size, vec_size);
4961 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4962 vec_size, vec_size);
4965 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4966 vec_size, vec_size);
4969 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4970 vec_size, vec_size);
4973 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4974 vec_size, vec_size);
4977 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4978 vec_size, vec_size);
4981 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4982 vec_size, vec_size);
4985 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4986 vec_size, vec_size);
4991 case NEON_3R_VADD_VSUB:
4993 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
4994 vec_size, vec_size);
4996 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
4997 vec_size, vec_size);
5002 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5003 rn_ofs, rm_ofs, vec_size, vec_size,
5004 (u ? uqadd_op : sqadd_op) + size);
5008 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5009 rn_ofs, rm_ofs, vec_size, vec_size,
5010 (u ? uqsub_op : sqsub_op) + size);
5013 case NEON_3R_VMUL: /* VMUL */
5015 /* Polynomial case allows only P8 and is handled below. */
5020 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
5021 vec_size, vec_size);
5026 case NEON_3R_VML: /* VMLA, VMLS */
5027 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
5028 u ? &mls_op[size] : &mla_op[size]);
5031 case NEON_3R_VTST_VCEQ:
5033 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
5034 vec_size, vec_size);
5036 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5037 vec_size, vec_size, &cmtst_op[size]);
5042 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5043 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5047 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5048 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5053 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5054 vec_size, vec_size);
5056 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5057 vec_size, vec_size);
5062 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5063 vec_size, vec_size);
5065 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5066 vec_size, vec_size);
5072 /* 64-bit element instructions. */
5073 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5074 neon_load_reg64(cpu_V0, rn + pass);
5075 neon_load_reg64(cpu_V1, rm + pass);
5079 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5081 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5086 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5089 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5095 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5097 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5100 case NEON_3R_VQRSHL:
5102 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5105 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5112 neon_store_reg64(cpu_V0, rd + pass);
5121 case NEON_3R_VQRSHL:
5124 /* Shift instruction operands are reversed. */
5130 case NEON_3R_VPADD_VQRDMLAH:
5135 case NEON_3R_FLOAT_ARITH:
5136 pairwise = (u && size < 2); /* if VPADD (float) */
5138 case NEON_3R_FLOAT_MINMAX:
5139 pairwise = u; /* if VPMIN/VPMAX (float) */
5141 case NEON_3R_FLOAT_CMP:
5143 /* no encoding for U=0 C=1x */
5147 case NEON_3R_FLOAT_ACMP:
5152 case NEON_3R_FLOAT_MISC:
5153 /* VMAXNM/VMINNM in ARMv8 */
5154 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5158 case NEON_3R_VFM_VQRDMLSH:
5159 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
5167 if (pairwise && q) {
5168 /* All the pairwise insns UNDEF if Q is set */
5172 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5177 tmp = neon_load_reg(rn, 0);
5178 tmp2 = neon_load_reg(rn, 1);
5180 tmp = neon_load_reg(rm, 0);
5181 tmp2 = neon_load_reg(rm, 1);
5185 tmp = neon_load_reg(rn, pass);
5186 tmp2 = neon_load_reg(rm, pass);
5190 GEN_NEON_INTEGER_OP(hadd);
5192 case NEON_3R_VRHADD:
5193 GEN_NEON_INTEGER_OP(rhadd);
5196 GEN_NEON_INTEGER_OP(hsub);
5199 GEN_NEON_INTEGER_OP(shl);
5202 GEN_NEON_INTEGER_OP_ENV(qshl);
5205 GEN_NEON_INTEGER_OP(rshl);
5207 case NEON_3R_VQRSHL:
5208 GEN_NEON_INTEGER_OP_ENV(qrshl);
5211 GEN_NEON_INTEGER_OP(abd);
5214 GEN_NEON_INTEGER_OP(abd);
5215 tcg_temp_free_i32(tmp2);
5216 tmp2 = neon_load_reg(rd, pass);
5217 gen_neon_add(size, tmp, tmp2);
5220 /* VMUL.P8; other cases already eliminated. */
5221 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5224 GEN_NEON_INTEGER_OP(pmax);
5227 GEN_NEON_INTEGER_OP(pmin);
5229 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5230 if (!u) { /* VQDMULH */
5233 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5236 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5240 } else { /* VQRDMULH */
5243 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5246 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5252 case NEON_3R_VPADD_VQRDMLAH:
5254 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5255 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5256 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5260 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5262 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5263 switch ((u << 2) | size) {
5266 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5269 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5272 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5277 tcg_temp_free_ptr(fpstatus);
5280 case NEON_3R_FLOAT_MULTIPLY:
5282 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5283 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5285 tcg_temp_free_i32(tmp2);
5286 tmp2 = neon_load_reg(rd, pass);
5288 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5290 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5293 tcg_temp_free_ptr(fpstatus);
5296 case NEON_3R_FLOAT_CMP:
5298 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5300 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5303 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5305 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5308 tcg_temp_free_ptr(fpstatus);
5311 case NEON_3R_FLOAT_ACMP:
5313 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5315 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5317 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5319 tcg_temp_free_ptr(fpstatus);
5322 case NEON_3R_FLOAT_MINMAX:
5324 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5326 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5328 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5330 tcg_temp_free_ptr(fpstatus);
5333 case NEON_3R_FLOAT_MISC:
5336 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5338 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5340 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5342 tcg_temp_free_ptr(fpstatus);
5345 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5347 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5351 case NEON_3R_VFM_VQRDMLSH:
5353 /* VFMA, VFMS: fused multiply-add */
5354 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5355 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5358 gen_helper_vfp_negs(tmp, tmp);
5360 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5361 tcg_temp_free_i32(tmp3);
5362 tcg_temp_free_ptr(fpstatus);
5368 tcg_temp_free_i32(tmp2);
5370 /* Save the result. For elementwise operations we can put it
5371 straight into the destination register. For pairwise operations
5372 we have to be careful to avoid clobbering the source operands. */
5373 if (pairwise && rd == rm) {
5374 neon_store_scratch(pass, tmp);
5376 neon_store_reg(rd, pass, tmp);
5380 if (pairwise && rd == rm) {
5381 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5382 tmp = neon_load_scratch(pass);
5383 neon_store_reg(rd, pass, tmp);
5386 /* End of 3 register same size operations. */
5387 } else if (insn & (1 << 4)) {
5388 if ((insn & 0x00380080) != 0) {
5389 /* Two registers and shift. */
5390 op = (insn >> 8) & 0xf;
5391 if (insn & (1 << 7)) {
5399 while ((insn & (1 << (size + 19))) == 0)
5402 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5404 /* Shift by immediate:
5405 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5406 if (q && ((rd | rm) & 1)) {
5409 if (!u && (op == 4 || op == 6)) {
5412 /* Right shifts are encoded as N - shift, where N is the
5413 element size in bits. */
5415 shift = shift - (1 << (size + 3));
5420 /* Right shift comes here negative. */
5422 /* Shifts larger than the element size are architecturally
5423 * valid. Unsigned results in all zeros; signed results
5427 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5428 MIN(shift, (8 << size) - 1),
5429 vec_size, vec_size);
5430 } else if (shift >= 8 << size) {
5431 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5433 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5434 vec_size, vec_size);
5439 /* Right shift comes here negative. */
5441 /* Shifts larger than the element size are architecturally
5442 * valid. Unsigned results in all zeros; signed results
5446 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5447 MIN(shift, (8 << size) - 1),
5449 } else if (shift >= 8 << size) {
5452 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5453 shift, &usra_op[size]);
5461 /* Right shift comes here negative. */
5463 /* Shift out of range leaves destination unchanged. */
5464 if (shift < 8 << size) {
5465 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5466 shift, &sri_op[size]);
5470 case 5: /* VSHL, VSLI */
5472 /* Shift out of range leaves destination unchanged. */
5473 if (shift < 8 << size) {
5474 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5475 vec_size, shift, &sli_op[size]);
5478 /* Shifts larger than the element size are
5479 * architecturally valid and results in zero.
5481 if (shift >= 8 << size) {
5482 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5484 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5485 vec_size, vec_size);
5497 /* To avoid excessive duplication of ops we implement shift
5498 * by immediate using the variable shift operations.
5500 imm = dup_const(size, shift);
5502 for (pass = 0; pass < count; pass++) {
5504 neon_load_reg64(cpu_V0, rm + pass);
5505 tcg_gen_movi_i64(cpu_V1, imm);
5510 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5512 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5514 case 6: /* VQSHLU */
5515 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5520 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5523 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5528 g_assert_not_reached();
5532 neon_load_reg64(cpu_V1, rd + pass);
5533 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5535 neon_store_reg64(cpu_V0, rd + pass);
5536 } else { /* size < 3 */
5537 /* Operands in T0 and T1. */
5538 tmp = neon_load_reg(rm, pass);
5539 tmp2 = tcg_temp_new_i32();
5540 tcg_gen_movi_i32(tmp2, imm);
5544 GEN_NEON_INTEGER_OP(rshl);
5546 case 6: /* VQSHLU */
5549 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5553 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5557 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5565 GEN_NEON_INTEGER_OP_ENV(qshl);
5568 g_assert_not_reached();
5570 tcg_temp_free_i32(tmp2);
5574 tmp2 = neon_load_reg(rd, pass);
5575 gen_neon_add(size, tmp, tmp2);
5576 tcg_temp_free_i32(tmp2);
5578 neon_store_reg(rd, pass, tmp);
5581 } else if (op < 10) {
5582 /* Shift by immediate and narrow:
5583 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5584 int input_unsigned = (op == 8) ? !u : u;
5588 shift = shift - (1 << (size + 3));
5591 tmp64 = tcg_const_i64(shift);
5592 neon_load_reg64(cpu_V0, rm);
5593 neon_load_reg64(cpu_V1, rm + 1);
5594 for (pass = 0; pass < 2; pass++) {
5602 if (input_unsigned) {
5603 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5605 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5608 if (input_unsigned) {
5609 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5611 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5614 tmp = tcg_temp_new_i32();
5615 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5616 neon_store_reg(rd, pass, tmp);
5618 tcg_temp_free_i64(tmp64);
5621 imm = (uint16_t)shift;
5625 imm = (uint32_t)shift;
5627 tmp2 = tcg_const_i32(imm);
5628 tmp4 = neon_load_reg(rm + 1, 0);
5629 tmp5 = neon_load_reg(rm + 1, 1);
5630 for (pass = 0; pass < 2; pass++) {
5632 tmp = neon_load_reg(rm, 0);
5636 gen_neon_shift_narrow(size, tmp, tmp2, q,
5639 tmp3 = neon_load_reg(rm, 1);
5643 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5645 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5646 tcg_temp_free_i32(tmp);
5647 tcg_temp_free_i32(tmp3);
5648 tmp = tcg_temp_new_i32();
5649 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5650 neon_store_reg(rd, pass, tmp);
5652 tcg_temp_free_i32(tmp2);
5654 } else if (op == 10) {
5656 if (q || (rd & 1)) {
5659 tmp = neon_load_reg(rm, 0);
5660 tmp2 = neon_load_reg(rm, 1);
5661 for (pass = 0; pass < 2; pass++) {
5665 gen_neon_widen(cpu_V0, tmp, size, u);
5668 /* The shift is less than the width of the source
5669 type, so we can just shift the whole register. */
5670 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5671 /* Widen the result of shift: we need to clear
5672 * the potential overflow bits resulting from
5673 * left bits of the narrow input appearing as
5674 * right bits of left the neighbour narrow
5676 if (size < 2 || !u) {
5679 imm = (0xffu >> (8 - shift));
5681 } else if (size == 1) {
5682 imm = 0xffff >> (16 - shift);
5685 imm = 0xffffffff >> (32 - shift);
5688 imm64 = imm | (((uint64_t)imm) << 32);
5692 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5695 neon_store_reg64(cpu_V0, rd + pass);
5697 } else if (op >= 14) {
5698 /* VCVT fixed-point. */
5701 VFPGenFixPointFn *fn;
5703 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5709 fn = gen_helper_vfp_ultos;
5711 fn = gen_helper_vfp_sltos;
5715 fn = gen_helper_vfp_touls_round_to_zero;
5717 fn = gen_helper_vfp_tosls_round_to_zero;
5721 /* We have already masked out the must-be-1 top bit of imm6,
5722 * hence this 32-shift where the ARM ARM has 64-imm6.
5725 fpst = get_fpstatus_ptr(1);
5726 shiftv = tcg_const_i32(shift);
5727 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5728 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5729 fn(tmpf, tmpf, shiftv, fpst);
5730 neon_store_reg(rd, pass, tmpf);
5732 tcg_temp_free_ptr(fpst);
5733 tcg_temp_free_i32(shiftv);
5737 } else { /* (insn & 0x00380080) == 0 */
5738 int invert, reg_ofs, vec_size;
5740 if (q && (rd & 1)) {
5744 op = (insn >> 8) & 0xf;
5745 /* One register and immediate. */
5746 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5747 invert = (insn & (1 << 5)) != 0;
5748 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5749 * We choose to not special-case this and will behave as if a
5750 * valid constant encoding of 0 had been given.
5769 imm = (imm << 8) | (imm << 24);
5772 imm = (imm << 8) | 0xff;
5775 imm = (imm << 16) | 0xffff;
5778 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5787 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5788 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5795 reg_ofs = neon_reg_offset(rd, 0);
5796 vec_size = q ? 16 : 8;
5798 if (op & 1 && op < 12) {
5800 /* The immediate value has already been inverted,
5801 * so BIC becomes AND.
5803 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5804 vec_size, vec_size);
5806 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5807 vec_size, vec_size);
5811 if (op == 14 && invert) {
5812 TCGv_i64 t64 = tcg_temp_new_i64();
5814 for (pass = 0; pass <= q; ++pass) {
5818 for (n = 0; n < 8; n++) {
5819 if (imm & (1 << (n + pass * 8))) {
5820 val |= 0xffull << (n * 8);
5823 tcg_gen_movi_i64(t64, val);
5824 neon_store_reg64(t64, rd + pass);
5826 tcg_temp_free_i64(t64);
5828 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
5832 } else { /* (insn & 0x00800010 == 0x00800000) */
5834 op = (insn >> 8) & 0xf;
5835 if ((insn & (1 << 6)) == 0) {
5836 /* Three registers of different lengths. */
5840 /* undefreq: bit 0 : UNDEF if size == 0
5841 * bit 1 : UNDEF if size == 1
5842 * bit 2 : UNDEF if size == 2
5843 * bit 3 : UNDEF if U == 1
5844 * Note that [2:0] set implies 'always UNDEF'
5847 /* prewiden, src1_wide, src2_wide, undefreq */
5848 static const int neon_3reg_wide[16][4] = {
5849 {1, 0, 0, 0}, /* VADDL */
5850 {1, 1, 0, 0}, /* VADDW */
5851 {1, 0, 0, 0}, /* VSUBL */
5852 {1, 1, 0, 0}, /* VSUBW */
5853 {0, 1, 1, 0}, /* VADDHN */
5854 {0, 0, 0, 0}, /* VABAL */
5855 {0, 1, 1, 0}, /* VSUBHN */
5856 {0, 0, 0, 0}, /* VABDL */
5857 {0, 0, 0, 0}, /* VMLAL */
5858 {0, 0, 0, 9}, /* VQDMLAL */
5859 {0, 0, 0, 0}, /* VMLSL */
5860 {0, 0, 0, 9}, /* VQDMLSL */
5861 {0, 0, 0, 0}, /* Integer VMULL */
5862 {0, 0, 0, 1}, /* VQDMULL */
5863 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5864 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5867 prewiden = neon_3reg_wide[op][0];
5868 src1_wide = neon_3reg_wide[op][1];
5869 src2_wide = neon_3reg_wide[op][2];
5870 undefreq = neon_3reg_wide[op][3];
5872 if ((undefreq & (1 << size)) ||
5873 ((undefreq & 8) && u)) {
5876 if ((src1_wide && (rn & 1)) ||
5877 (src2_wide && (rm & 1)) ||
5878 (!src2_wide && (rd & 1))) {
5882 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5883 * outside the loop below as it only performs a single pass.
5885 if (op == 14 && size == 2) {
5886 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5888 if (!dc_isar_feature(aa32_pmull, s)) {
5891 tcg_rn = tcg_temp_new_i64();
5892 tcg_rm = tcg_temp_new_i64();
5893 tcg_rd = tcg_temp_new_i64();
5894 neon_load_reg64(tcg_rn, rn);
5895 neon_load_reg64(tcg_rm, rm);
5896 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5897 neon_store_reg64(tcg_rd, rd);
5898 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5899 neon_store_reg64(tcg_rd, rd + 1);
5900 tcg_temp_free_i64(tcg_rn);
5901 tcg_temp_free_i64(tcg_rm);
5902 tcg_temp_free_i64(tcg_rd);
5906 /* Avoid overlapping operands. Wide source operands are
5907 always aligned so will never overlap with wide
5908 destinations in problematic ways. */
5909 if (rd == rm && !src2_wide) {
5910 tmp = neon_load_reg(rm, 1);
5911 neon_store_scratch(2, tmp);
5912 } else if (rd == rn && !src1_wide) {
5913 tmp = neon_load_reg(rn, 1);
5914 neon_store_scratch(2, tmp);
5917 for (pass = 0; pass < 2; pass++) {
5919 neon_load_reg64(cpu_V0, rn + pass);
5922 if (pass == 1 && rd == rn) {
5923 tmp = neon_load_scratch(2);
5925 tmp = neon_load_reg(rn, pass);
5928 gen_neon_widen(cpu_V0, tmp, size, u);
5932 neon_load_reg64(cpu_V1, rm + pass);
5935 if (pass == 1 && rd == rm) {
5936 tmp2 = neon_load_scratch(2);
5938 tmp2 = neon_load_reg(rm, pass);
5941 gen_neon_widen(cpu_V1, tmp2, size, u);
5945 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5946 gen_neon_addl(size);
5948 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5949 gen_neon_subl(size);
5951 case 5: case 7: /* VABAL, VABDL */
5952 switch ((size << 1) | u) {
5954 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5957 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5960 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5963 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5966 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5969 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5973 tcg_temp_free_i32(tmp2);
5974 tcg_temp_free_i32(tmp);
5976 case 8: case 9: case 10: case 11: case 12: case 13:
5977 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5978 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5980 case 14: /* Polynomial VMULL */
5981 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5982 tcg_temp_free_i32(tmp2);
5983 tcg_temp_free_i32(tmp);
5985 default: /* 15 is RESERVED: caught earlier */
5990 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5991 neon_store_reg64(cpu_V0, rd + pass);
5992 } else if (op == 5 || (op >= 8 && op <= 11)) {
5994 neon_load_reg64(cpu_V1, rd + pass);
5996 case 10: /* VMLSL */
5997 gen_neon_negl(cpu_V0, size);
5999 case 5: case 8: /* VABAL, VMLAL */
6000 gen_neon_addl(size);
6002 case 9: case 11: /* VQDMLAL, VQDMLSL */
6003 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6005 gen_neon_negl(cpu_V0, size);
6007 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6012 neon_store_reg64(cpu_V0, rd + pass);
6013 } else if (op == 4 || op == 6) {
6014 /* Narrowing operation. */
6015 tmp = tcg_temp_new_i32();
6019 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6022 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6025 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6026 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6033 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6036 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6039 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6040 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6041 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6049 neon_store_reg(rd, 0, tmp3);
6050 neon_store_reg(rd, 1, tmp);
6053 /* Write back the result. */
6054 neon_store_reg64(cpu_V0, rd + pass);
6058 /* Two registers and a scalar. NB that for ops of this form
6059 * the ARM ARM labels bit 24 as Q, but it is in our variable
6066 case 1: /* Float VMLA scalar */
6067 case 5: /* Floating point VMLS scalar */
6068 case 9: /* Floating point VMUL scalar */
6073 case 0: /* Integer VMLA scalar */
6074 case 4: /* Integer VMLS scalar */
6075 case 8: /* Integer VMUL scalar */
6076 case 12: /* VQDMULH scalar */
6077 case 13: /* VQRDMULH scalar */
6078 if (u && ((rd | rn) & 1)) {
6081 tmp = neon_get_scalar(size, rm);
6082 neon_store_scratch(0, tmp);
6083 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6084 tmp = neon_load_scratch(0);
6085 tmp2 = neon_load_reg(rn, pass);
6088 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6090 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6092 } else if (op == 13) {
6094 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6096 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6098 } else if (op & 1) {
6099 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6100 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6101 tcg_temp_free_ptr(fpstatus);
6104 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6105 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6106 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6110 tcg_temp_free_i32(tmp2);
6113 tmp2 = neon_load_reg(rd, pass);
6116 gen_neon_add(size, tmp, tmp2);
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6121 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6122 tcg_temp_free_ptr(fpstatus);
6126 gen_neon_rsb(size, tmp, tmp2);
6130 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6131 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6132 tcg_temp_free_ptr(fpstatus);
6138 tcg_temp_free_i32(tmp2);
6140 neon_store_reg(rd, pass, tmp);
6143 case 3: /* VQDMLAL scalar */
6144 case 7: /* VQDMLSL scalar */
6145 case 11: /* VQDMULL scalar */
6150 case 2: /* VMLAL sclar */
6151 case 6: /* VMLSL scalar */
6152 case 10: /* VMULL scalar */
6156 tmp2 = neon_get_scalar(size, rm);
6157 /* We need a copy of tmp2 because gen_neon_mull
6158 * deletes it during pass 0. */
6159 tmp4 = tcg_temp_new_i32();
6160 tcg_gen_mov_i32(tmp4, tmp2);
6161 tmp3 = neon_load_reg(rn, 1);
6163 for (pass = 0; pass < 2; pass++) {
6165 tmp = neon_load_reg(rn, 0);
6170 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6172 neon_load_reg64(cpu_V1, rd + pass);
6176 gen_neon_negl(cpu_V0, size);
6179 gen_neon_addl(size);
6182 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6184 gen_neon_negl(cpu_V0, size);
6186 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6192 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6197 neon_store_reg64(cpu_V0, rd + pass);
6200 case 14: /* VQRDMLAH scalar */
6201 case 15: /* VQRDMLSH scalar */
6203 NeonGenThreeOpEnvFn *fn;
6205 if (!dc_isar_feature(aa32_rdm, s)) {
6208 if (u && ((rd | rn) & 1)) {
6213 fn = gen_helper_neon_qrdmlah_s16;
6215 fn = gen_helper_neon_qrdmlah_s32;
6219 fn = gen_helper_neon_qrdmlsh_s16;
6221 fn = gen_helper_neon_qrdmlsh_s32;
6225 tmp2 = neon_get_scalar(size, rm);
6226 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6227 tmp = neon_load_reg(rn, pass);
6228 tmp3 = neon_load_reg(rd, pass);
6229 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6230 tcg_temp_free_i32(tmp3);
6231 neon_store_reg(rd, pass, tmp);
6233 tcg_temp_free_i32(tmp2);
6237 g_assert_not_reached();
6240 } else { /* size == 3 */
6243 imm = (insn >> 8) & 0xf;
6248 if (q && ((rd | rn | rm) & 1)) {
6253 neon_load_reg64(cpu_V0, rn);
6255 neon_load_reg64(cpu_V1, rn + 1);
6257 } else if (imm == 8) {
6258 neon_load_reg64(cpu_V0, rn + 1);
6260 neon_load_reg64(cpu_V1, rm);
6263 tmp64 = tcg_temp_new_i64();
6265 neon_load_reg64(cpu_V0, rn);
6266 neon_load_reg64(tmp64, rn + 1);
6268 neon_load_reg64(cpu_V0, rn + 1);
6269 neon_load_reg64(tmp64, rm);
6271 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6272 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6273 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6275 neon_load_reg64(cpu_V1, rm);
6277 neon_load_reg64(cpu_V1, rm + 1);
6280 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6281 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6282 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6283 tcg_temp_free_i64(tmp64);
6286 neon_load_reg64(cpu_V0, rn);
6287 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6288 neon_load_reg64(cpu_V1, rm);
6289 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6290 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6292 neon_store_reg64(cpu_V0, rd);
6294 neon_store_reg64(cpu_V1, rd + 1);
6296 } else if ((insn & (1 << 11)) == 0) {
6297 /* Two register misc. */
6298 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6299 size = (insn >> 18) & 3;
6300 /* UNDEF for unknown op values and bad op-size combinations */
6301 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6304 if (neon_2rm_is_v8_op(op) &&
6305 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6308 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6309 q && ((rm | rd) & 1)) {
6313 case NEON_2RM_VREV64:
6314 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6315 tmp = neon_load_reg(rm, pass * 2);
6316 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6318 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6319 case 1: gen_swap_half(tmp); break;
6320 case 2: /* no-op */ break;
6323 neon_store_reg(rd, pass * 2 + 1, tmp);
6325 neon_store_reg(rd, pass * 2, tmp2);
6328 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6329 case 1: gen_swap_half(tmp2); break;
6332 neon_store_reg(rd, pass * 2, tmp2);
6336 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6337 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6338 for (pass = 0; pass < q + 1; pass++) {
6339 tmp = neon_load_reg(rm, pass * 2);
6340 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6341 tmp = neon_load_reg(rm, pass * 2 + 1);
6342 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6344 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6345 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6346 case 2: tcg_gen_add_i64(CPU_V001); break;
6349 if (op >= NEON_2RM_VPADAL) {
6351 neon_load_reg64(cpu_V1, rd + pass);
6352 gen_neon_addl(size);
6354 neon_store_reg64(cpu_V0, rd + pass);
6360 for (n = 0; n < (q ? 4 : 2); n += 2) {
6361 tmp = neon_load_reg(rm, n);
6362 tmp2 = neon_load_reg(rd, n + 1);
6363 neon_store_reg(rm, n, tmp2);
6364 neon_store_reg(rd, n + 1, tmp);
6371 if (gen_neon_unzip(rd, rm, size, q)) {
6376 if (gen_neon_zip(rd, rm, size, q)) {
6380 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6381 /* also VQMOVUN; op field and mnemonics don't line up */
6386 for (pass = 0; pass < 2; pass++) {
6387 neon_load_reg64(cpu_V0, rm + pass);
6388 tmp = tcg_temp_new_i32();
6389 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6394 neon_store_reg(rd, 0, tmp2);
6395 neon_store_reg(rd, 1, tmp);
6399 case NEON_2RM_VSHLL:
6400 if (q || (rd & 1)) {
6403 tmp = neon_load_reg(rm, 0);
6404 tmp2 = neon_load_reg(rm, 1);
6405 for (pass = 0; pass < 2; pass++) {
6408 gen_neon_widen(cpu_V0, tmp, size, 1);
6409 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6410 neon_store_reg64(cpu_V0, rd + pass);
6413 case NEON_2RM_VCVT_F16_F32:
6418 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6422 fpst = get_fpstatus_ptr(true);
6423 ahp = get_ahp_flag();
6424 tmp = neon_load_reg(rm, 0);
6425 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6426 tmp2 = neon_load_reg(rm, 1);
6427 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
6428 tcg_gen_shli_i32(tmp2, tmp2, 16);
6429 tcg_gen_or_i32(tmp2, tmp2, tmp);
6430 tcg_temp_free_i32(tmp);
6431 tmp = neon_load_reg(rm, 2);
6432 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6433 tmp3 = neon_load_reg(rm, 3);
6434 neon_store_reg(rd, 0, tmp2);
6435 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6436 tcg_gen_shli_i32(tmp3, tmp3, 16);
6437 tcg_gen_or_i32(tmp3, tmp3, tmp);
6438 neon_store_reg(rd, 1, tmp3);
6439 tcg_temp_free_i32(tmp);
6440 tcg_temp_free_i32(ahp);
6441 tcg_temp_free_ptr(fpst);
6444 case NEON_2RM_VCVT_F32_F16:
6448 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6452 fpst = get_fpstatus_ptr(true);
6453 ahp = get_ahp_flag();
6454 tmp3 = tcg_temp_new_i32();
6455 tmp = neon_load_reg(rm, 0);
6456 tmp2 = neon_load_reg(rm, 1);
6457 tcg_gen_ext16u_i32(tmp3, tmp);
6458 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6459 neon_store_reg(rd, 0, tmp3);
6460 tcg_gen_shri_i32(tmp, tmp, 16);
6461 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6462 neon_store_reg(rd, 1, tmp);
6463 tmp3 = tcg_temp_new_i32();
6464 tcg_gen_ext16u_i32(tmp3, tmp2);
6465 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6466 neon_store_reg(rd, 2, tmp3);
6467 tcg_gen_shri_i32(tmp2, tmp2, 16);
6468 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6469 neon_store_reg(rd, 3, tmp2);
6470 tcg_temp_free_i32(ahp);
6471 tcg_temp_free_ptr(fpst);
6474 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6475 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
6478 ptr1 = vfp_reg_ptr(true, rd);
6479 ptr2 = vfp_reg_ptr(true, rm);
6481 /* Bit 6 is the lowest opcode bit; it distinguishes between
6482 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6484 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6486 if (op == NEON_2RM_AESE) {
6487 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
6489 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
6491 tcg_temp_free_ptr(ptr1);
6492 tcg_temp_free_ptr(ptr2);
6493 tcg_temp_free_i32(tmp3);
6495 case NEON_2RM_SHA1H:
6496 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
6499 ptr1 = vfp_reg_ptr(true, rd);
6500 ptr2 = vfp_reg_ptr(true, rm);
6502 gen_helper_crypto_sha1h(ptr1, ptr2);
6504 tcg_temp_free_ptr(ptr1);
6505 tcg_temp_free_ptr(ptr2);
6507 case NEON_2RM_SHA1SU1:
6508 if ((rm | rd) & 1) {
6511 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6513 if (!dc_isar_feature(aa32_sha2, s)) {
6516 } else if (!dc_isar_feature(aa32_sha1, s)) {
6519 ptr1 = vfp_reg_ptr(true, rd);
6520 ptr2 = vfp_reg_ptr(true, rm);
6522 gen_helper_crypto_sha256su0(ptr1, ptr2);
6524 gen_helper_crypto_sha1su1(ptr1, ptr2);
6526 tcg_temp_free_ptr(ptr1);
6527 tcg_temp_free_ptr(ptr2);
6531 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6534 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6537 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6542 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6543 tmp = neon_load_reg(rm, pass);
6545 case NEON_2RM_VREV32:
6547 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6548 case 1: gen_swap_half(tmp); break;
6552 case NEON_2RM_VREV16:
6557 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6558 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6559 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6565 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6566 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6567 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
6572 gen_helper_neon_cnt_u8(tmp, tmp);
6574 case NEON_2RM_VQABS:
6577 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6580 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6583 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6588 case NEON_2RM_VQNEG:
6591 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6594 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6597 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6602 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6603 tmp2 = tcg_const_i32(0);
6605 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6606 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6607 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6610 tcg_temp_free_i32(tmp2);
6611 if (op == NEON_2RM_VCLE0) {
6612 tcg_gen_not_i32(tmp, tmp);
6615 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6616 tmp2 = tcg_const_i32(0);
6618 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6619 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6620 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6623 tcg_temp_free_i32(tmp2);
6624 if (op == NEON_2RM_VCLT0) {
6625 tcg_gen_not_i32(tmp, tmp);
6628 case NEON_2RM_VCEQ0:
6629 tmp2 = tcg_const_i32(0);
6631 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6632 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6633 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6636 tcg_temp_free_i32(tmp2);
6638 case NEON_2RM_VCGT0_F:
6640 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6641 tmp2 = tcg_const_i32(0);
6642 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6643 tcg_temp_free_i32(tmp2);
6644 tcg_temp_free_ptr(fpstatus);
6647 case NEON_2RM_VCGE0_F:
6649 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6650 tmp2 = tcg_const_i32(0);
6651 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6652 tcg_temp_free_i32(tmp2);
6653 tcg_temp_free_ptr(fpstatus);
6656 case NEON_2RM_VCEQ0_F:
6658 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6659 tmp2 = tcg_const_i32(0);
6660 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6661 tcg_temp_free_i32(tmp2);
6662 tcg_temp_free_ptr(fpstatus);
6665 case NEON_2RM_VCLE0_F:
6667 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6668 tmp2 = tcg_const_i32(0);
6669 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6670 tcg_temp_free_i32(tmp2);
6671 tcg_temp_free_ptr(fpstatus);
6674 case NEON_2RM_VCLT0_F:
6676 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6677 tmp2 = tcg_const_i32(0);
6678 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6679 tcg_temp_free_i32(tmp2);
6680 tcg_temp_free_ptr(fpstatus);
6683 case NEON_2RM_VABS_F:
6684 gen_helper_vfp_abss(tmp, tmp);
6686 case NEON_2RM_VNEG_F:
6687 gen_helper_vfp_negs(tmp, tmp);
6690 tmp2 = neon_load_reg(rd, pass);
6691 neon_store_reg(rm, pass, tmp2);
6694 tmp2 = neon_load_reg(rd, pass);
6696 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6697 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6700 neon_store_reg(rm, pass, tmp2);
6702 case NEON_2RM_VRINTN:
6703 case NEON_2RM_VRINTA:
6704 case NEON_2RM_VRINTM:
6705 case NEON_2RM_VRINTP:
6706 case NEON_2RM_VRINTZ:
6709 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6712 if (op == NEON_2RM_VRINTZ) {
6713 rmode = FPROUNDING_ZERO;
6715 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6718 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6719 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6721 gen_helper_rints(tmp, tmp, fpstatus);
6722 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6724 tcg_temp_free_ptr(fpstatus);
6725 tcg_temp_free_i32(tcg_rmode);
6728 case NEON_2RM_VRINTX:
6730 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6731 gen_helper_rints_exact(tmp, tmp, fpstatus);
6732 tcg_temp_free_ptr(fpstatus);
6735 case NEON_2RM_VCVTAU:
6736 case NEON_2RM_VCVTAS:
6737 case NEON_2RM_VCVTNU:
6738 case NEON_2RM_VCVTNS:
6739 case NEON_2RM_VCVTPU:
6740 case NEON_2RM_VCVTPS:
6741 case NEON_2RM_VCVTMU:
6742 case NEON_2RM_VCVTMS:
6744 bool is_signed = !extract32(insn, 7, 1);
6745 TCGv_ptr fpst = get_fpstatus_ptr(1);
6746 TCGv_i32 tcg_rmode, tcg_shift;
6747 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6749 tcg_shift = tcg_const_i32(0);
6750 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6751 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6755 gen_helper_vfp_tosls(tmp, tmp,
6758 gen_helper_vfp_touls(tmp, tmp,
6762 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6764 tcg_temp_free_i32(tcg_rmode);
6765 tcg_temp_free_i32(tcg_shift);
6766 tcg_temp_free_ptr(fpst);
6769 case NEON_2RM_VRECPE:
6771 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6772 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6773 tcg_temp_free_ptr(fpstatus);
6776 case NEON_2RM_VRSQRTE:
6778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6779 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6780 tcg_temp_free_ptr(fpstatus);
6783 case NEON_2RM_VRECPE_F:
6785 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6786 gen_helper_recpe_f32(tmp, tmp, fpstatus);
6787 tcg_temp_free_ptr(fpstatus);
6790 case NEON_2RM_VRSQRTE_F:
6792 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6793 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
6794 tcg_temp_free_ptr(fpstatus);
6797 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6800 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6801 tcg_temp_free_ptr(fpstatus);
6804 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6806 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6807 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6808 tcg_temp_free_ptr(fpstatus);
6811 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6813 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6814 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6815 tcg_temp_free_ptr(fpstatus);
6818 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6820 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6821 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6822 tcg_temp_free_ptr(fpstatus);
6826 /* Reserved op values were caught by the
6827 * neon_2rm_sizes[] check earlier.
6831 neon_store_reg(rd, pass, tmp);
6835 } else if ((insn & (1 << 10)) == 0) {
6837 int n = ((insn >> 8) & 3) + 1;
6838 if ((rn + n) > 32) {
6839 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6840 * helper function running off the end of the register file.
6845 if (insn & (1 << 6)) {
6846 tmp = neon_load_reg(rd, 0);
6848 tmp = tcg_temp_new_i32();
6849 tcg_gen_movi_i32(tmp, 0);
6851 tmp2 = neon_load_reg(rm, 0);
6852 ptr1 = vfp_reg_ptr(true, rn);
6853 tmp5 = tcg_const_i32(n);
6854 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
6855 tcg_temp_free_i32(tmp);
6856 if (insn & (1 << 6)) {
6857 tmp = neon_load_reg(rd, 1);
6859 tmp = tcg_temp_new_i32();
6860 tcg_gen_movi_i32(tmp, 0);
6862 tmp3 = neon_load_reg(rm, 1);
6863 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
6864 tcg_temp_free_i32(tmp5);
6865 tcg_temp_free_ptr(ptr1);
6866 neon_store_reg(rd, 0, tmp2);
6867 neon_store_reg(rd, 1, tmp3);
6868 tcg_temp_free_i32(tmp);
6869 } else if ((insn & 0x380) == 0) {
6874 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6877 if (insn & (1 << 16)) {
6879 element = (insn >> 17) & 7;
6880 } else if (insn & (1 << 17)) {
6882 element = (insn >> 18) & 3;
6885 element = (insn >> 19) & 1;
6887 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6888 neon_element_offset(rm, element, size),
6889 q ? 16 : 8, q ? 16 : 8);
6898 /* Advanced SIMD three registers of the same length extension.
6899 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6900 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6901 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6902 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6904 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6906 gen_helper_gvec_3 *fn_gvec = NULL;
6907 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6908 int rd, rn, rm, opr_sz;
6911 bool is_long = false, q = extract32(insn, 6, 1);
6912 bool ptr_is_env = false;
6914 if ((insn & 0xfe200f10) == 0xfc200800) {
6915 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
6916 int size = extract32(insn, 20, 1);
6917 data = extract32(insn, 23, 2); /* rot */
6918 if (!dc_isar_feature(aa32_vcma, s)
6919 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6922 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6923 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6924 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
6925 int size = extract32(insn, 20, 1);
6926 data = extract32(insn, 24, 1); /* rot */
6927 if (!dc_isar_feature(aa32_vcma, s)
6928 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6931 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
6932 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6933 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6934 bool u = extract32(insn, 4, 1);
6935 if (!dc_isar_feature(aa32_dp, s)) {
6938 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
6939 } else if ((insn & 0xff300f10) == 0xfc200810) {
6940 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6941 int is_s = extract32(insn, 23, 1);
6942 if (!dc_isar_feature(aa32_fhm, s)) {
6946 data = is_s; /* is_2 == 0 */
6947 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6953 VFP_DREG_D(rd, insn);
6957 if (q || !is_long) {
6958 VFP_DREG_N(rn, insn);
6959 VFP_DREG_M(rm, insn);
6960 if ((rn | rm) & q & !is_long) {
6963 off_rn = vfp_reg_offset(1, rn);
6964 off_rm = vfp_reg_offset(1, rm);
6966 rn = VFP_SREG_N(insn);
6967 rm = VFP_SREG_M(insn);
6968 off_rn = vfp_reg_offset(0, rn);
6969 off_rm = vfp_reg_offset(0, rm);
6972 if (s->fp_excp_el) {
6973 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
6974 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
6977 if (!s->vfp_enabled) {
6981 opr_sz = (1 + q) * 8;
6987 ptr = get_fpstatus_ptr(1);
6989 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
6990 opr_sz, opr_sz, data, fn_gvec_ptr);
6992 tcg_temp_free_ptr(ptr);
6995 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
6996 opr_sz, opr_sz, data, fn_gvec);
7001 /* Advanced SIMD two registers and a scalar extension.
7002 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7003 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7004 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7005 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7009 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7011 gen_helper_gvec_3 *fn_gvec = NULL;
7012 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7013 int rd, rn, rm, opr_sz, data;
7015 bool is_long = false, q = extract32(insn, 6, 1);
7016 bool ptr_is_env = false;
7018 if ((insn & 0xff000f10) == 0xfe000800) {
7019 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7020 int rot = extract32(insn, 20, 2);
7021 int size = extract32(insn, 23, 1);
7024 if (!dc_isar_feature(aa32_vcma, s)) {
7028 if (!dc_isar_feature(aa32_fp16_arith, s)) {
7031 /* For fp16, rm is just Vm, and index is M. */
7032 rm = extract32(insn, 0, 4);
7033 index = extract32(insn, 5, 1);
7035 /* For fp32, rm is the usual M:Vm, and index is 0. */
7036 VFP_DREG_M(rm, insn);
7039 data = (index << 2) | rot;
7040 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7041 : gen_helper_gvec_fcmlah_idx);
7042 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7043 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7044 int u = extract32(insn, 4, 1);
7046 if (!dc_isar_feature(aa32_dp, s)) {
7049 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7050 /* rm is just Vm, and index is M. */
7051 data = extract32(insn, 5, 1); /* index */
7052 rm = extract32(insn, 0, 4);
7053 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7054 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7055 int is_s = extract32(insn, 20, 1);
7056 int vm20 = extract32(insn, 0, 3);
7057 int vm3 = extract32(insn, 3, 1);
7058 int m = extract32(insn, 5, 1);
7061 if (!dc_isar_feature(aa32_fhm, s)) {
7066 index = m * 2 + vm3;
7072 data = (index << 2) | is_s; /* is_2 == 0 */
7073 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7079 VFP_DREG_D(rd, insn);
7083 if (q || !is_long) {
7084 VFP_DREG_N(rn, insn);
7085 if (rn & q & !is_long) {
7088 off_rn = vfp_reg_offset(1, rn);
7089 off_rm = vfp_reg_offset(1, rm);
7091 rn = VFP_SREG_N(insn);
7092 off_rn = vfp_reg_offset(0, rn);
7093 off_rm = vfp_reg_offset(0, rm);
7095 if (s->fp_excp_el) {
7096 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
7097 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
7100 if (!s->vfp_enabled) {
7104 opr_sz = (1 + q) * 8;
7110 ptr = get_fpstatus_ptr(1);
7112 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
7113 opr_sz, opr_sz, data, fn_gvec_ptr);
7115 tcg_temp_free_ptr(ptr);
7118 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
7119 opr_sz, opr_sz, data, fn_gvec);
7124 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7126 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7127 const ARMCPRegInfo *ri;
7129 cpnum = (insn >> 8) & 0xf;
7131 /* First check for coprocessor space used for XScale/iwMMXt insns */
7132 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7133 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7136 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7137 return disas_iwmmxt_insn(s, insn);
7138 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7139 return disas_dsp_insn(s, insn);
7144 /* Otherwise treat as a generic register access */
7145 is64 = (insn & (1 << 25)) == 0;
7146 if (!is64 && ((insn & (1 << 4)) == 0)) {
7154 opc1 = (insn >> 4) & 0xf;
7156 rt2 = (insn >> 16) & 0xf;
7158 crn = (insn >> 16) & 0xf;
7159 opc1 = (insn >> 21) & 7;
7160 opc2 = (insn >> 5) & 7;
7163 isread = (insn >> 20) & 1;
7164 rt = (insn >> 12) & 0xf;
7166 ri = get_arm_cp_reginfo(s->cp_regs,
7167 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7169 /* Check access permissions */
7170 if (!cp_access_ok(s->current_el, ri, isread)) {
7175 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7176 /* Emit code to perform further access permissions checks at
7177 * runtime; this may result in an exception.
7178 * Note that on XScale all cp0..c13 registers do an access check
7179 * call in order to handle c15_cpar.
7182 TCGv_i32 tcg_syn, tcg_isread;
7185 /* Note that since we are an implementation which takes an
7186 * exception on a trapped conditional instruction only if the
7187 * instruction passes its condition code check, we can take
7188 * advantage of the clause in the ARM ARM that allows us to set
7189 * the COND field in the instruction to 0xE in all cases.
7190 * We could fish the actual condition out of the insn (ARM)
7191 * or the condexec bits (Thumb) but it isn't necessary.
7196 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7199 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7205 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7208 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7213 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7214 * so this can only happen if this is an ARMv7 or earlier CPU,
7215 * in which case the syndrome information won't actually be
7218 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7219 syndrome = syn_uncategorized();
7223 gen_set_condexec(s);
7224 gen_set_pc_im(s, s->pc_curr);
7225 tmpptr = tcg_const_ptr(ri);
7226 tcg_syn = tcg_const_i32(syndrome);
7227 tcg_isread = tcg_const_i32(isread);
7228 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7230 tcg_temp_free_ptr(tmpptr);
7231 tcg_temp_free_i32(tcg_syn);
7232 tcg_temp_free_i32(tcg_isread);
7235 /* Handle special cases first */
7236 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7243 gen_set_pc_im(s, s->base.pc_next);
7244 s->base.is_jmp = DISAS_WFI;
7250 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7259 if (ri->type & ARM_CP_CONST) {
7260 tmp64 = tcg_const_i64(ri->resetvalue);
7261 } else if (ri->readfn) {
7263 tmp64 = tcg_temp_new_i64();
7264 tmpptr = tcg_const_ptr(ri);
7265 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7266 tcg_temp_free_ptr(tmpptr);
7268 tmp64 = tcg_temp_new_i64();
7269 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7271 tmp = tcg_temp_new_i32();
7272 tcg_gen_extrl_i64_i32(tmp, tmp64);
7273 store_reg(s, rt, tmp);
7274 tcg_gen_shri_i64(tmp64, tmp64, 32);
7275 tmp = tcg_temp_new_i32();
7276 tcg_gen_extrl_i64_i32(tmp, tmp64);
7277 tcg_temp_free_i64(tmp64);
7278 store_reg(s, rt2, tmp);
7281 if (ri->type & ARM_CP_CONST) {
7282 tmp = tcg_const_i32(ri->resetvalue);
7283 } else if (ri->readfn) {
7285 tmp = tcg_temp_new_i32();
7286 tmpptr = tcg_const_ptr(ri);
7287 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7288 tcg_temp_free_ptr(tmpptr);
7290 tmp = load_cpu_offset(ri->fieldoffset);
7293 /* Destination register of r15 for 32 bit loads sets
7294 * the condition codes from the high 4 bits of the value
7297 tcg_temp_free_i32(tmp);
7299 store_reg(s, rt, tmp);
7304 if (ri->type & ARM_CP_CONST) {
7305 /* If not forbidden by access permissions, treat as WI */
7310 TCGv_i32 tmplo, tmphi;
7311 TCGv_i64 tmp64 = tcg_temp_new_i64();
7312 tmplo = load_reg(s, rt);
7313 tmphi = load_reg(s, rt2);
7314 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7315 tcg_temp_free_i32(tmplo);
7316 tcg_temp_free_i32(tmphi);
7318 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7319 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7320 tcg_temp_free_ptr(tmpptr);
7322 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7324 tcg_temp_free_i64(tmp64);
7329 tmp = load_reg(s, rt);
7330 tmpptr = tcg_const_ptr(ri);
7331 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7332 tcg_temp_free_ptr(tmpptr);
7333 tcg_temp_free_i32(tmp);
7335 TCGv_i32 tmp = load_reg(s, rt);
7336 store_cpu_offset(tmp, ri->fieldoffset);
7341 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7342 /* I/O operations must end the TB here (whether read or write) */
7345 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7346 /* We default to ending the TB on a coprocessor register write,
7347 * but allow this to be suppressed by the register definition
7348 * (usually only necessary to work around guest bugs).
7356 /* Unknown register; this might be a guest error or a QEMU
7357 * unimplemented feature.
7360 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7361 "64 bit system register cp:%d opc1: %d crm:%d "
7363 isread ? "read" : "write", cpnum, opc1, crm,
7364 s->ns ? "non-secure" : "secure");
7366 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7367 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7369 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7370 s->ns ? "non-secure" : "secure");
7377 /* Store a 64-bit value to a register pair. Clobbers val. */
7378 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7381 tmp = tcg_temp_new_i32();
7382 tcg_gen_extrl_i64_i32(tmp, val);
7383 store_reg(s, rlow, tmp);
7384 tmp = tcg_temp_new_i32();
7385 tcg_gen_shri_i64(val, val, 32);
7386 tcg_gen_extrl_i64_i32(tmp, val);
7387 store_reg(s, rhigh, tmp);
7390 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7391 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7396 /* Load value and extend to 64 bits. */
7397 tmp = tcg_temp_new_i64();
7398 tmp2 = load_reg(s, rlow);
7399 tcg_gen_extu_i32_i64(tmp, tmp2);
7400 tcg_temp_free_i32(tmp2);
7401 tcg_gen_add_i64(val, val, tmp);
7402 tcg_temp_free_i64(tmp);
7405 /* load and add a 64-bit value from a register pair. */
7406 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7412 /* Load 64-bit value rd:rn. */
7413 tmpl = load_reg(s, rlow);
7414 tmph = load_reg(s, rhigh);
7415 tmp = tcg_temp_new_i64();
7416 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7417 tcg_temp_free_i32(tmpl);
7418 tcg_temp_free_i32(tmph);
7419 tcg_gen_add_i64(val, val, tmp);
7420 tcg_temp_free_i64(tmp);
7423 /* Set N and Z flags from hi|lo. */
7424 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7426 tcg_gen_mov_i32(cpu_NF, hi);
7427 tcg_gen_or_i32(cpu_ZF, lo, hi);
7430 /* Load/Store exclusive instructions are implemented by remembering
7431 the value/address loaded, and seeing if these are the same
7432 when the store is performed. This should be sufficient to implement
7433 the architecturally mandated semantics, and avoids having to monitor
7434 regular stores. The compare vs the remembered value is done during
7435 the cmpxchg operation, but we must compare the addresses manually. */
7436 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7437 TCGv_i32 addr, int size)
7439 TCGv_i32 tmp = tcg_temp_new_i32();
7440 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7445 TCGv_i32 tmp2 = tcg_temp_new_i32();
7446 TCGv_i64 t64 = tcg_temp_new_i64();
7448 /* For AArch32, architecturally the 32-bit word at the lowest
7449 * address is always Rt and the one at addr+4 is Rt2, even if
7450 * the CPU is big-endian. That means we don't want to do a
7451 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7452 * for an architecturally 64-bit access, but instead do a
7453 * 64-bit access using MO_BE if appropriate and then split
7455 * This only makes a difference for BE32 user-mode, where
7456 * frob64() must not flip the two halves of the 64-bit data
7457 * but this code must treat BE32 user-mode like BE32 system.
7459 TCGv taddr = gen_aa32_addr(s, addr, opc);
7461 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7462 tcg_temp_free(taddr);
7463 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7464 if (s->be_data == MO_BE) {
7465 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7467 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7469 tcg_temp_free_i64(t64);
7471 store_reg(s, rt2, tmp2);
7473 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
7474 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7477 store_reg(s, rt, tmp);
7478 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7481 static void gen_clrex(DisasContext *s)
7483 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7486 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7487 TCGv_i32 addr, int size)
7489 TCGv_i32 t0, t1, t2;
7492 TCGLabel *done_label;
7493 TCGLabel *fail_label;
7494 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7496 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7502 fail_label = gen_new_label();
7503 done_label = gen_new_label();
7504 extaddr = tcg_temp_new_i64();
7505 tcg_gen_extu_i32_i64(extaddr, addr);
7506 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7507 tcg_temp_free_i64(extaddr);
7509 taddr = gen_aa32_addr(s, addr, opc);
7510 t0 = tcg_temp_new_i32();
7511 t1 = load_reg(s, rt);
7513 TCGv_i64 o64 = tcg_temp_new_i64();
7514 TCGv_i64 n64 = tcg_temp_new_i64();
7516 t2 = load_reg(s, rt2);
7517 /* For AArch32, architecturally the 32-bit word at the lowest
7518 * address is always Rt and the one at addr+4 is Rt2, even if
7519 * the CPU is big-endian. Since we're going to treat this as a
7520 * single 64-bit BE store, we need to put the two halves in the
7521 * opposite order for BE to LE, so that they end up in the right
7523 * We don't want gen_aa32_frob64() because that does the wrong
7524 * thing for BE32 usermode.
7526 if (s->be_data == MO_BE) {
7527 tcg_gen_concat_i32_i64(n64, t2, t1);
7529 tcg_gen_concat_i32_i64(n64, t1, t2);
7531 tcg_temp_free_i32(t2);
7533 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7534 get_mem_index(s), opc);
7535 tcg_temp_free_i64(n64);
7537 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7538 tcg_gen_extrl_i64_i32(t0, o64);
7540 tcg_temp_free_i64(o64);
7542 t2 = tcg_temp_new_i32();
7543 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7544 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7545 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7546 tcg_temp_free_i32(t2);
7548 tcg_temp_free_i32(t1);
7549 tcg_temp_free(taddr);
7550 tcg_gen_mov_i32(cpu_R[rd], t0);
7551 tcg_temp_free_i32(t0);
7552 tcg_gen_br(done_label);
7554 gen_set_label(fail_label);
7555 tcg_gen_movi_i32(cpu_R[rd], 1);
7556 gen_set_label(done_label);
7557 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7563 * @mode: mode field from insn (which stack to store to)
7564 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7565 * @writeback: true if writeback bit set
7567 * Generate code for the SRS (Store Return State) insn.
7569 static void gen_srs(DisasContext *s,
7570 uint32_t mode, uint32_t amode, bool writeback)
7577 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7578 * and specified mode is monitor mode
7579 * - UNDEFINED in Hyp mode
7580 * - UNPREDICTABLE in User or System mode
7581 * - UNPREDICTABLE if the specified mode is:
7582 * -- not implemented
7583 * -- not a valid mode number
7584 * -- a mode that's at a higher exception level
7585 * -- Monitor, if we are Non-secure
7586 * For the UNPREDICTABLE cases we choose to UNDEF.
7588 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7589 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
7593 if (s->current_el == 0 || s->current_el == 2) {
7598 case ARM_CPU_MODE_USR:
7599 case ARM_CPU_MODE_FIQ:
7600 case ARM_CPU_MODE_IRQ:
7601 case ARM_CPU_MODE_SVC:
7602 case ARM_CPU_MODE_ABT:
7603 case ARM_CPU_MODE_UND:
7604 case ARM_CPU_MODE_SYS:
7606 case ARM_CPU_MODE_HYP:
7607 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7611 case ARM_CPU_MODE_MON:
7612 /* No need to check specifically for "are we non-secure" because
7613 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7614 * so if this isn't EL3 then we must be non-secure.
7616 if (s->current_el != 3) {
7625 unallocated_encoding(s);
7629 addr = tcg_temp_new_i32();
7630 tmp = tcg_const_i32(mode);
7631 /* get_r13_banked() will raise an exception if called from System mode */
7632 gen_set_condexec(s);
7633 gen_set_pc_im(s, s->pc_curr);
7634 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7635 tcg_temp_free_i32(tmp);
7652 tcg_gen_addi_i32(addr, addr, offset);
7653 tmp = load_reg(s, 14);
7654 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7655 tcg_temp_free_i32(tmp);
7656 tmp = load_cpu_field(spsr);
7657 tcg_gen_addi_i32(addr, addr, 4);
7658 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7659 tcg_temp_free_i32(tmp);
7677 tcg_gen_addi_i32(addr, addr, offset);
7678 tmp = tcg_const_i32(mode);
7679 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7680 tcg_temp_free_i32(tmp);
7682 tcg_temp_free_i32(addr);
7683 s->base.is_jmp = DISAS_UPDATE;
7686 /* Generate a label used for skipping this instruction */
7687 static void arm_gen_condlabel(DisasContext *s)
7690 s->condlabel = gen_new_label();
7695 /* Skip this instruction if the ARM condition is false */
7696 static void arm_skip_unless(DisasContext *s, uint32_t cond)
7698 arm_gen_condlabel(s);
7699 arm_gen_test_cc(cond ^ 1, s->condlabel);
7702 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7704 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7711 /* M variants do not implement ARM mode; this must raise the INVSTATE
7712 * UsageFault exception.
7714 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7715 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
7716 default_exception_el(s));
7721 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7722 * choose to UNDEF. In ARMv5 and above the space is used
7723 * for miscellaneous unconditional instructions.
7727 /* Unconditional instructions. */
7728 if (((insn >> 25) & 7) == 1) {
7729 /* NEON Data processing. */
7730 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7734 if (disas_neon_data_insn(s, insn)) {
7739 if ((insn & 0x0f100000) == 0x04000000) {
7740 /* NEON load/store. */
7741 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7745 if (disas_neon_ls_insn(s, insn)) {
7750 if ((insn & 0x0f000e10) == 0x0e000a00) {
7752 if (disas_vfp_insn(s, insn)) {
7757 if (((insn & 0x0f30f000) == 0x0510f000) ||
7758 ((insn & 0x0f30f010) == 0x0710f000)) {
7759 if ((insn & (1 << 22)) == 0) {
7761 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7765 /* Otherwise PLD; v5TE+ */
7769 if (((insn & 0x0f70f000) == 0x0450f000) ||
7770 ((insn & 0x0f70f010) == 0x0650f000)) {
7772 return; /* PLI; V7 */
7774 if (((insn & 0x0f700000) == 0x04100000) ||
7775 ((insn & 0x0f700010) == 0x06100000)) {
7776 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7779 return; /* v7MP: Unallocated memory hint: must NOP */
7782 if ((insn & 0x0ffffdff) == 0x01010000) {
7785 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
7786 gen_helper_setend(cpu_env);
7787 s->base.is_jmp = DISAS_UPDATE;
7790 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7791 switch ((insn >> 4) & 0xf) {
7799 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7802 /* We need to break the TB after this insn to execute
7803 * self-modifying code correctly and also to take
7804 * any pending interrupts immediately.
7806 gen_goto_tb(s, 0, s->base.pc_next);
7809 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
7813 * TODO: There is no speculation barrier opcode
7814 * for TCG; MB and end the TB instead.
7816 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7817 gen_goto_tb(s, 0, s->base.pc_next);
7822 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7825 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7827 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7833 rn = (insn >> 16) & 0xf;
7834 addr = load_reg(s, rn);
7835 i = (insn >> 23) & 3;
7837 case 0: offset = -4; break; /* DA */
7838 case 1: offset = 0; break; /* IA */
7839 case 2: offset = -8; break; /* DB */
7840 case 3: offset = 4; break; /* IB */
7844 tcg_gen_addi_i32(addr, addr, offset);
7845 /* Load PC into tmp and CPSR into tmp2. */
7846 tmp = tcg_temp_new_i32();
7847 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7848 tcg_gen_addi_i32(addr, addr, 4);
7849 tmp2 = tcg_temp_new_i32();
7850 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
7851 if (insn & (1 << 21)) {
7852 /* Base writeback. */
7854 case 0: offset = -8; break;
7855 case 1: offset = 4; break;
7856 case 2: offset = -4; break;
7857 case 3: offset = 0; break;
7861 tcg_gen_addi_i32(addr, addr, offset);
7862 store_reg(s, rn, addr);
7864 tcg_temp_free_i32(addr);
7866 gen_rfe(s, tmp, tmp2);
7868 } else if ((insn & 0x0e000000) == 0x0a000000) {
7869 /* branch link and change to thumb (blx <offset>) */
7872 tmp = tcg_temp_new_i32();
7873 tcg_gen_movi_i32(tmp, s->base.pc_next);
7874 store_reg(s, 14, tmp);
7875 /* Sign-extend the 24-bit offset */
7876 offset = (((int32_t)insn) << 8) >> 8;
7878 /* offset * 4 + bit24 * 2 + (thumb bit) */
7879 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7880 /* protected by ARCH(5); above, near the start of uncond block */
7883 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7884 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7885 /* iWMMXt register transfer. */
7886 if (extract32(s->c15_cpar, 1, 1)) {
7887 if (!disas_iwmmxt_insn(s, insn)) {
7892 } else if ((insn & 0x0e000a00) == 0x0c000800
7893 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7894 if (disas_neon_insn_3same_ext(s, insn)) {
7898 } else if ((insn & 0x0f000a00) == 0x0e000800
7899 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7900 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
7904 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7905 /* Coprocessor double register transfer. */
7907 } else if ((insn & 0x0f000010) == 0x0e000010) {
7908 /* Additional coprocessor register transfer. */
7909 } else if ((insn & 0x0ff10020) == 0x01000000) {
7912 /* cps (privileged) */
7916 if (insn & (1 << 19)) {
7917 if (insn & (1 << 8))
7919 if (insn & (1 << 7))
7921 if (insn & (1 << 6))
7923 if (insn & (1 << 18))
7926 if (insn & (1 << 17)) {
7928 val |= (insn & 0x1f);
7931 gen_set_psr_im(s, mask, 0, val);
7938 /* if not always execute, we generate a conditional jump to
7940 arm_skip_unless(s, cond);
7942 if ((insn & 0x0f900000) == 0x03000000) {
7943 if ((insn & (1 << 21)) == 0) {
7945 rd = (insn >> 12) & 0xf;
7946 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7947 if ((insn & (1 << 22)) == 0) {
7949 tmp = tcg_temp_new_i32();
7950 tcg_gen_movi_i32(tmp, val);
7953 tmp = load_reg(s, rd);
7954 tcg_gen_ext16u_i32(tmp, tmp);
7955 tcg_gen_ori_i32(tmp, tmp, val << 16);
7957 store_reg(s, rd, tmp);
7959 if (((insn >> 12) & 0xf) != 0xf)
7961 if (((insn >> 16) & 0xf) == 0) {
7962 gen_nop_hint(s, insn & 0xff);
7964 /* CPSR = immediate */
7966 shift = ((insn >> 8) & 0xf) * 2;
7968 val = (val >> shift) | (val << (32 - shift));
7969 i = ((insn & (1 << 22)) != 0);
7970 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7976 } else if ((insn & 0x0f900000) == 0x01000000
7977 && (insn & 0x00000090) != 0x00000090) {
7978 /* miscellaneous instructions */
7979 op1 = (insn >> 21) & 3;
7980 sh = (insn >> 4) & 0xf;
7983 case 0x0: /* MSR, MRS */
7984 if (insn & (1 << 9)) {
7985 /* MSR (banked) and MRS (banked) */
7986 int sysm = extract32(insn, 16, 4) |
7987 (extract32(insn, 8, 1) << 4);
7988 int r = extract32(insn, 22, 1);
7992 gen_msr_banked(s, r, sysm, rm);
7995 int rd = extract32(insn, 12, 4);
7997 gen_mrs_banked(s, r, sysm, rd);
8002 /* MSR, MRS (for PSRs) */
8005 tmp = load_reg(s, rm);
8006 i = ((op1 & 2) != 0);
8007 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8011 rd = (insn >> 12) & 0xf;
8015 tmp = load_cpu_field(spsr);
8017 tmp = tcg_temp_new_i32();
8018 gen_helper_cpsr_read(tmp, cpu_env);
8020 store_reg(s, rd, tmp);
8025 /* branch/exchange thumb (bx). */
8027 tmp = load_reg(s, rm);
8029 } else if (op1 == 3) {
8032 rd = (insn >> 12) & 0xf;
8033 tmp = load_reg(s, rm);
8034 tcg_gen_clzi_i32(tmp, tmp, 32);
8035 store_reg(s, rd, tmp);
8043 /* Trivial implementation equivalent to bx. */
8044 tmp = load_reg(s, rm);
8055 /* branch link/exchange thumb (blx) */
8056 tmp = load_reg(s, rm);
8057 tmp2 = tcg_temp_new_i32();
8058 tcg_gen_movi_i32(tmp2, s->base.pc_next);
8059 store_reg(s, 14, tmp2);
8065 uint32_t c = extract32(insn, 8, 4);
8067 /* Check this CPU supports ARMv8 CRC instructions.
8068 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8069 * Bits 8, 10 and 11 should be zero.
8071 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
8075 rn = extract32(insn, 16, 4);
8076 rd = extract32(insn, 12, 4);
8078 tmp = load_reg(s, rn);
8079 tmp2 = load_reg(s, rm);
8081 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8082 } else if (op1 == 1) {
8083 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8085 tmp3 = tcg_const_i32(1 << op1);
8087 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8089 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8091 tcg_temp_free_i32(tmp2);
8092 tcg_temp_free_i32(tmp3);
8093 store_reg(s, rd, tmp);
8096 case 0x5: /* saturating add/subtract */
8098 rd = (insn >> 12) & 0xf;
8099 rn = (insn >> 16) & 0xf;
8100 tmp = load_reg(s, rm);
8101 tmp2 = load_reg(s, rn);
8103 gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
8105 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8107 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8108 tcg_temp_free_i32(tmp2);
8109 store_reg(s, rd, tmp);
8111 case 0x6: /* ERET */
8115 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8118 if ((insn & 0x000fff0f) != 0x0000000e) {
8119 /* UNPREDICTABLE; we choose to UNDEF */
8123 if (s->current_el == 2) {
8124 tmp = load_cpu_field(elr_el[2]);
8126 tmp = load_reg(s, 14);
8128 gen_exception_return(s, tmp);
8132 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8141 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
8144 /* Hypervisor call (v7) */
8152 /* Secure monitor call (v6+) */
8160 g_assert_not_reached();
8164 case 0x8: /* signed multiply */
8169 rs = (insn >> 8) & 0xf;
8170 rn = (insn >> 12) & 0xf;
8171 rd = (insn >> 16) & 0xf;
8173 /* (32 * 16) >> 16 */
8174 tmp = load_reg(s, rm);
8175 tmp2 = load_reg(s, rs);
8177 tcg_gen_sari_i32(tmp2, tmp2, 16);
8180 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8181 tcg_gen_shri_i64(tmp64, tmp64, 16);
8182 tmp = tcg_temp_new_i32();
8183 tcg_gen_extrl_i64_i32(tmp, tmp64);
8184 tcg_temp_free_i64(tmp64);
8185 if ((sh & 2) == 0) {
8186 tmp2 = load_reg(s, rn);
8187 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8188 tcg_temp_free_i32(tmp2);
8190 store_reg(s, rd, tmp);
8193 tmp = load_reg(s, rm);
8194 tmp2 = load_reg(s, rs);
8195 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8196 tcg_temp_free_i32(tmp2);
8198 tmp64 = tcg_temp_new_i64();
8199 tcg_gen_ext_i32_i64(tmp64, tmp);
8200 tcg_temp_free_i32(tmp);
8201 gen_addq(s, tmp64, rn, rd);
8202 gen_storeq_reg(s, rn, rd, tmp64);
8203 tcg_temp_free_i64(tmp64);
8206 tmp2 = load_reg(s, rn);
8207 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8208 tcg_temp_free_i32(tmp2);
8210 store_reg(s, rd, tmp);
8217 } else if (((insn & 0x0e000000) == 0 &&
8218 (insn & 0x00000090) != 0x90) ||
8219 ((insn & 0x0e000000) == (1 << 25))) {
8220 int set_cc, logic_cc, shiftop;
8222 op1 = (insn >> 21) & 0xf;
8223 set_cc = (insn >> 20) & 1;
8224 logic_cc = table_logic_cc[op1] & set_cc;
8226 /* data processing instruction */
8227 if (insn & (1 << 25)) {
8228 /* immediate operand */
8230 shift = ((insn >> 8) & 0xf) * 2;
8232 val = (val >> shift) | (val << (32 - shift));
8234 tmp2 = tcg_temp_new_i32();
8235 tcg_gen_movi_i32(tmp2, val);
8236 if (logic_cc && shift) {
8237 gen_set_CF_bit31(tmp2);
8242 tmp2 = load_reg(s, rm);
8243 shiftop = (insn >> 5) & 3;
8244 if (!(insn & (1 << 4))) {
8245 shift = (insn >> 7) & 0x1f;
8246 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8248 rs = (insn >> 8) & 0xf;
8249 tmp = load_reg(s, rs);
8250 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8253 if (op1 != 0x0f && op1 != 0x0d) {
8254 rn = (insn >> 16) & 0xf;
8255 tmp = load_reg(s, rn);
8259 rd = (insn >> 12) & 0xf;
8262 tcg_gen_and_i32(tmp, tmp, tmp2);
8266 store_reg_bx(s, rd, tmp);
8269 tcg_gen_xor_i32(tmp, tmp, tmp2);
8273 store_reg_bx(s, rd, tmp);
8276 if (set_cc && rd == 15) {
8277 /* SUBS r15, ... is used for exception return. */
8281 gen_sub_CC(tmp, tmp, tmp2);
8282 gen_exception_return(s, tmp);
8285 gen_sub_CC(tmp, tmp, tmp2);
8287 tcg_gen_sub_i32(tmp, tmp, tmp2);
8289 store_reg_bx(s, rd, tmp);
8294 gen_sub_CC(tmp, tmp2, tmp);
8296 tcg_gen_sub_i32(tmp, tmp2, tmp);
8298 store_reg_bx(s, rd, tmp);
8302 gen_add_CC(tmp, tmp, tmp2);
8304 tcg_gen_add_i32(tmp, tmp, tmp2);
8306 store_reg_bx(s, rd, tmp);
8310 gen_adc_CC(tmp, tmp, tmp2);
8312 gen_add_carry(tmp, tmp, tmp2);
8314 store_reg_bx(s, rd, tmp);
8318 gen_sbc_CC(tmp, tmp, tmp2);
8320 gen_sub_carry(tmp, tmp, tmp2);
8322 store_reg_bx(s, rd, tmp);
8326 gen_sbc_CC(tmp, tmp2, tmp);
8328 gen_sub_carry(tmp, tmp2, tmp);
8330 store_reg_bx(s, rd, tmp);
8334 tcg_gen_and_i32(tmp, tmp, tmp2);
8337 tcg_temp_free_i32(tmp);
8341 tcg_gen_xor_i32(tmp, tmp, tmp2);
8344 tcg_temp_free_i32(tmp);
8348 gen_sub_CC(tmp, tmp, tmp2);
8350 tcg_temp_free_i32(tmp);
8354 gen_add_CC(tmp, tmp, tmp2);
8356 tcg_temp_free_i32(tmp);
8359 tcg_gen_or_i32(tmp, tmp, tmp2);
8363 store_reg_bx(s, rd, tmp);
8366 if (logic_cc && rd == 15) {
8367 /* MOVS r15, ... is used for exception return. */
8371 gen_exception_return(s, tmp2);
8376 store_reg_bx(s, rd, tmp2);
8380 tcg_gen_andc_i32(tmp, tmp, tmp2);
8384 store_reg_bx(s, rd, tmp);
8388 tcg_gen_not_i32(tmp2, tmp2);
8392 store_reg_bx(s, rd, tmp2);
8395 if (op1 != 0x0f && op1 != 0x0d) {
8396 tcg_temp_free_i32(tmp2);
8399 /* other instructions */
8400 op1 = (insn >> 24) & 0xf;
8404 /* multiplies, extra load/stores */
8405 sh = (insn >> 5) & 3;
8408 rd = (insn >> 16) & 0xf;
8409 rn = (insn >> 12) & 0xf;
8410 rs = (insn >> 8) & 0xf;
8412 op1 = (insn >> 20) & 0xf;
8414 case 0: case 1: case 2: case 3: case 6:
8416 tmp = load_reg(s, rs);
8417 tmp2 = load_reg(s, rm);
8418 tcg_gen_mul_i32(tmp, tmp, tmp2);
8419 tcg_temp_free_i32(tmp2);
8420 if (insn & (1 << 22)) {
8421 /* Subtract (mls) */
8423 tmp2 = load_reg(s, rn);
8424 tcg_gen_sub_i32(tmp, tmp2, tmp);
8425 tcg_temp_free_i32(tmp2);
8426 } else if (insn & (1 << 21)) {
8428 tmp2 = load_reg(s, rn);
8429 tcg_gen_add_i32(tmp, tmp, tmp2);
8430 tcg_temp_free_i32(tmp2);
8432 if (insn & (1 << 20))
8434 store_reg(s, rd, tmp);
8437 /* 64 bit mul double accumulate (UMAAL) */
8439 tmp = load_reg(s, rs);
8440 tmp2 = load_reg(s, rm);
8441 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8442 gen_addq_lo(s, tmp64, rn);
8443 gen_addq_lo(s, tmp64, rd);
8444 gen_storeq_reg(s, rn, rd, tmp64);
8445 tcg_temp_free_i64(tmp64);
8447 case 8: case 9: case 10: case 11:
8448 case 12: case 13: case 14: case 15:
8449 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8450 tmp = load_reg(s, rs);
8451 tmp2 = load_reg(s, rm);
8452 if (insn & (1 << 22)) {
8453 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8455 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8457 if (insn & (1 << 21)) { /* mult accumulate */
8458 TCGv_i32 al = load_reg(s, rn);
8459 TCGv_i32 ah = load_reg(s, rd);
8460 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8461 tcg_temp_free_i32(al);
8462 tcg_temp_free_i32(ah);
8464 if (insn & (1 << 20)) {
8465 gen_logicq_cc(tmp, tmp2);
8467 store_reg(s, rn, tmp);
8468 store_reg(s, rd, tmp2);
8474 rn = (insn >> 16) & 0xf;
8475 rd = (insn >> 12) & 0xf;
8476 if (insn & (1 << 23)) {
8477 /* load/store exclusive */
8478 bool is_ld = extract32(insn, 20, 1);
8479 bool is_lasr = !extract32(insn, 8, 1);
8480 int op2 = (insn >> 8) & 3;
8481 op1 = (insn >> 21) & 0x3;
8484 case 0: /* lda/stl */
8490 case 1: /* reserved */
8492 case 2: /* ldaex/stlex */
8495 case 3: /* ldrex/strex */
8504 addr = tcg_temp_local_new_i32();
8505 load_reg_var(s, addr, rn);
8507 if (is_lasr && !is_ld) {
8508 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8513 tmp = tcg_temp_new_i32();
8516 gen_aa32_ld32u_iss(s, tmp, addr,
8521 gen_aa32_ld8u_iss(s, tmp, addr,
8526 gen_aa32_ld16u_iss(s, tmp, addr,
8533 store_reg(s, rd, tmp);
8536 tmp = load_reg(s, rm);
8539 gen_aa32_st32_iss(s, tmp, addr,
8544 gen_aa32_st8_iss(s, tmp, addr,
8549 gen_aa32_st16_iss(s, tmp, addr,
8556 tcg_temp_free_i32(tmp);
8561 gen_load_exclusive(s, rd, 15, addr, 2);
8563 case 1: /* ldrexd */
8564 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8566 case 2: /* ldrexb */
8567 gen_load_exclusive(s, rd, 15, addr, 0);
8569 case 3: /* ldrexh */
8570 gen_load_exclusive(s, rd, 15, addr, 1);
8579 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8581 case 1: /* strexd */
8582 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8584 case 2: /* strexb */
8585 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8587 case 3: /* strexh */
8588 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8594 tcg_temp_free_i32(addr);
8596 if (is_lasr && is_ld) {
8597 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8599 } else if ((insn & 0x00300f00) == 0) {
8600 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8605 TCGMemOp opc = s->be_data;
8609 if (insn & (1 << 22)) {
8612 opc |= MO_UL | MO_ALIGN;
8615 addr = load_reg(s, rn);
8616 taddr = gen_aa32_addr(s, addr, opc);
8617 tcg_temp_free_i32(addr);
8619 tmp = load_reg(s, rm);
8620 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8621 get_mem_index(s), opc);
8622 tcg_temp_free(taddr);
8623 store_reg(s, rd, tmp);
8630 bool load = insn & (1 << 20);
8631 bool wbit = insn & (1 << 21);
8632 bool pbit = insn & (1 << 24);
8633 bool doubleword = false;
8636 /* Misc load/store */
8637 rn = (insn >> 16) & 0xf;
8638 rd = (insn >> 12) & 0xf;
8640 /* ISS not valid if writeback */
8641 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8643 if (!load && (sh & 2)) {
8647 /* UNPREDICTABLE; we choose to UNDEF */
8650 load = (sh & 1) == 0;
8654 addr = load_reg(s, rn);
8656 gen_add_datah_offset(s, insn, 0, addr);
8663 tmp = load_reg(s, rd);
8664 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8665 tcg_temp_free_i32(tmp);
8666 tcg_gen_addi_i32(addr, addr, 4);
8667 tmp = load_reg(s, rd + 1);
8668 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8669 tcg_temp_free_i32(tmp);
8672 tmp = tcg_temp_new_i32();
8673 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8674 store_reg(s, rd, tmp);
8675 tcg_gen_addi_i32(addr, addr, 4);
8676 tmp = tcg_temp_new_i32();
8677 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8680 address_offset = -4;
8683 tmp = tcg_temp_new_i32();
8686 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8690 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8695 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8701 tmp = load_reg(s, rd);
8702 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
8703 tcg_temp_free_i32(tmp);
8705 /* Perform base writeback before the loaded value to
8706 ensure correct behavior with overlapping index registers.
8707 ldrd with base writeback is undefined if the
8708 destination and index registers overlap. */
8710 gen_add_datah_offset(s, insn, address_offset, addr);
8711 store_reg(s, rn, addr);
8714 tcg_gen_addi_i32(addr, addr, address_offset);
8715 store_reg(s, rn, addr);
8717 tcg_temp_free_i32(addr);
8720 /* Complete the load. */
8721 store_reg(s, rd, tmp);
8730 if (insn & (1 << 4)) {
8732 /* Armv6 Media instructions. */
8734 rn = (insn >> 16) & 0xf;
8735 rd = (insn >> 12) & 0xf;
8736 rs = (insn >> 8) & 0xf;
8737 switch ((insn >> 23) & 3) {
8738 case 0: /* Parallel add/subtract. */
8739 op1 = (insn >> 20) & 7;
8740 tmp = load_reg(s, rn);
8741 tmp2 = load_reg(s, rm);
8742 sh = (insn >> 5) & 7;
8743 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8745 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8746 tcg_temp_free_i32(tmp2);
8747 store_reg(s, rd, tmp);
8750 if ((insn & 0x00700020) == 0) {
8751 /* Halfword pack. */
8752 tmp = load_reg(s, rn);
8753 tmp2 = load_reg(s, rm);
8754 shift = (insn >> 7) & 0x1f;
8755 if (insn & (1 << 6)) {
8760 tcg_gen_sari_i32(tmp2, tmp2, shift);
8761 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
8764 tcg_gen_shli_i32(tmp2, tmp2, shift);
8765 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
8767 tcg_temp_free_i32(tmp2);
8768 store_reg(s, rd, tmp);
8769 } else if ((insn & 0x00200020) == 0x00200000) {
8771 tmp = load_reg(s, rm);
8772 shift = (insn >> 7) & 0x1f;
8773 if (insn & (1 << 6)) {
8776 tcg_gen_sari_i32(tmp, tmp, shift);
8778 tcg_gen_shli_i32(tmp, tmp, shift);
8780 sh = (insn >> 16) & 0x1f;
8781 tmp2 = tcg_const_i32(sh);
8782 if (insn & (1 << 22))
8783 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8785 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8786 tcg_temp_free_i32(tmp2);
8787 store_reg(s, rd, tmp);
8788 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8790 tmp = load_reg(s, rm);
8791 sh = (insn >> 16) & 0x1f;
8792 tmp2 = tcg_const_i32(sh);
8793 if (insn & (1 << 22))
8794 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8796 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8797 tcg_temp_free_i32(tmp2);
8798 store_reg(s, rd, tmp);
8799 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8801 tmp = load_reg(s, rn);
8802 tmp2 = load_reg(s, rm);
8803 tmp3 = tcg_temp_new_i32();
8804 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8805 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8806 tcg_temp_free_i32(tmp3);
8807 tcg_temp_free_i32(tmp2);
8808 store_reg(s, rd, tmp);
8809 } else if ((insn & 0x000003e0) == 0x00000060) {
8810 tmp = load_reg(s, rm);
8811 shift = (insn >> 10) & 3;
8812 /* ??? In many cases it's not necessary to do a
8813 rotate, a shift is sufficient. */
8815 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8816 op1 = (insn >> 20) & 7;
8818 case 0: gen_sxtb16(tmp); break;
8819 case 2: gen_sxtb(tmp); break;
8820 case 3: gen_sxth(tmp); break;
8821 case 4: gen_uxtb16(tmp); break;
8822 case 6: gen_uxtb(tmp); break;
8823 case 7: gen_uxth(tmp); break;
8824 default: goto illegal_op;
8827 tmp2 = load_reg(s, rn);
8828 if ((op1 & 3) == 0) {
8829 gen_add16(tmp, tmp2);
8831 tcg_gen_add_i32(tmp, tmp, tmp2);
8832 tcg_temp_free_i32(tmp2);
8835 store_reg(s, rd, tmp);
8836 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8838 tmp = load_reg(s, rm);
8839 if (insn & (1 << 22)) {
8840 if (insn & (1 << 7)) {
8844 gen_helper_rbit(tmp, tmp);
8847 if (insn & (1 << 7))
8850 tcg_gen_bswap32_i32(tmp, tmp);
8852 store_reg(s, rd, tmp);
8857 case 2: /* Multiplies (Type 3). */
8858 switch ((insn >> 20) & 0x7) {
8860 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8861 /* op2 not 00x or 11x : UNDEF */
8864 /* Signed multiply most significant [accumulate].
8865 (SMMUL, SMMLA, SMMLS) */
8866 tmp = load_reg(s, rm);
8867 tmp2 = load_reg(s, rs);
8868 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8871 tmp = load_reg(s, rd);
8872 if (insn & (1 << 6)) {
8873 tmp64 = gen_subq_msw(tmp64, tmp);
8875 tmp64 = gen_addq_msw(tmp64, tmp);
8878 if (insn & (1 << 5)) {
8879 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8881 tcg_gen_shri_i64(tmp64, tmp64, 32);
8882 tmp = tcg_temp_new_i32();
8883 tcg_gen_extrl_i64_i32(tmp, tmp64);
8884 tcg_temp_free_i64(tmp64);
8885 store_reg(s, rn, tmp);
8889 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8890 if (insn & (1 << 7)) {
8893 tmp = load_reg(s, rm);
8894 tmp2 = load_reg(s, rs);
8895 if (insn & (1 << 5))
8896 gen_swap_half(tmp2);
8897 gen_smul_dual(tmp, tmp2);
8898 if (insn & (1 << 22)) {
8899 /* smlald, smlsld */
8902 tmp64 = tcg_temp_new_i64();
8903 tmp64_2 = tcg_temp_new_i64();
8904 tcg_gen_ext_i32_i64(tmp64, tmp);
8905 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8906 tcg_temp_free_i32(tmp);
8907 tcg_temp_free_i32(tmp2);
8908 if (insn & (1 << 6)) {
8909 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8911 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8913 tcg_temp_free_i64(tmp64_2);
8914 gen_addq(s, tmp64, rd, rn);
8915 gen_storeq_reg(s, rd, rn, tmp64);
8916 tcg_temp_free_i64(tmp64);
8918 /* smuad, smusd, smlad, smlsd */
8919 if (insn & (1 << 6)) {
8920 /* This subtraction cannot overflow. */
8921 tcg_gen_sub_i32(tmp, tmp, tmp2);
8923 /* This addition cannot overflow 32 bits;
8924 * however it may overflow considered as a
8925 * signed operation, in which case we must set
8928 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8930 tcg_temp_free_i32(tmp2);
8933 tmp2 = load_reg(s, rd);
8934 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8935 tcg_temp_free_i32(tmp2);
8937 store_reg(s, rn, tmp);
8943 if (!dc_isar_feature(arm_div, s)) {
8946 if (((insn >> 5) & 7) || (rd != 15)) {
8949 tmp = load_reg(s, rm);
8950 tmp2 = load_reg(s, rs);
8951 if (insn & (1 << 21)) {
8952 gen_helper_udiv(tmp, tmp, tmp2);
8954 gen_helper_sdiv(tmp, tmp, tmp2);
8956 tcg_temp_free_i32(tmp2);
8957 store_reg(s, rn, tmp);
8964 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8966 case 0: /* Unsigned sum of absolute differences. */
8968 tmp = load_reg(s, rm);
8969 tmp2 = load_reg(s, rs);
8970 gen_helper_usad8(tmp, tmp, tmp2);
8971 tcg_temp_free_i32(tmp2);
8973 tmp2 = load_reg(s, rd);
8974 tcg_gen_add_i32(tmp, tmp, tmp2);
8975 tcg_temp_free_i32(tmp2);
8977 store_reg(s, rn, tmp);
8979 case 0x20: case 0x24: case 0x28: case 0x2c:
8980 /* Bitfield insert/clear. */
8982 shift = (insn >> 7) & 0x1f;
8983 i = (insn >> 16) & 0x1f;
8985 /* UNPREDICTABLE; we choose to UNDEF */
8990 tmp = tcg_temp_new_i32();
8991 tcg_gen_movi_i32(tmp, 0);
8993 tmp = load_reg(s, rm);
8996 tmp2 = load_reg(s, rd);
8997 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8998 tcg_temp_free_i32(tmp2);
9000 store_reg(s, rd, tmp);
9002 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9003 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9005 tmp = load_reg(s, rm);
9006 shift = (insn >> 7) & 0x1f;
9007 i = ((insn >> 16) & 0x1f) + 1;
9012 tcg_gen_extract_i32(tmp, tmp, shift, i);
9014 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9017 store_reg(s, rd, tmp);
9027 /* Check for undefined extension instructions
9028 * per the ARM Bible IE:
9029 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9031 sh = (0xf << 20) | (0xf << 4);
9032 if (op1 == 0x7 && ((insn & sh) == sh))
9036 /* load/store byte/word */
9037 rn = (insn >> 16) & 0xf;
9038 rd = (insn >> 12) & 0xf;
9039 tmp2 = load_reg(s, rn);
9040 if ((insn & 0x01200000) == 0x00200000) {
9042 i = get_a32_user_mem_index(s);
9044 i = get_mem_index(s);
9046 if (insn & (1 << 24))
9047 gen_add_data_offset(s, insn, tmp2);
9048 if (insn & (1 << 20)) {
9050 tmp = tcg_temp_new_i32();
9051 if (insn & (1 << 22)) {
9052 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9054 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9058 tmp = load_reg(s, rd);
9059 if (insn & (1 << 22)) {
9060 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9062 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9064 tcg_temp_free_i32(tmp);
9066 if (!(insn & (1 << 24))) {
9067 gen_add_data_offset(s, insn, tmp2);
9068 store_reg(s, rn, tmp2);
9069 } else if (insn & (1 << 21)) {
9070 store_reg(s, rn, tmp2);
9072 tcg_temp_free_i32(tmp2);
9074 if (insn & (1 << 20)) {
9075 /* Complete the load. */
9076 store_reg_from_load(s, rd, tmp);
9082 int j, n, loaded_base;
9083 bool exc_return = false;
9084 bool is_load = extract32(insn, 20, 1);
9086 TCGv_i32 loaded_var;
9087 /* load/store multiple words */
9088 /* XXX: store correct base if write back */
9089 if (insn & (1 << 22)) {
9090 /* LDM (user), LDM (exception return) and STM (user) */
9092 goto illegal_op; /* only usable in supervisor mode */
9094 if (is_load && extract32(insn, 15, 1)) {
9100 rn = (insn >> 16) & 0xf;
9101 addr = load_reg(s, rn);
9103 /* compute total size */
9107 for (i = 0; i < 16; i++) {
9108 if (insn & (1 << i))
9111 /* XXX: test invalid n == 0 case ? */
9112 if (insn & (1 << 23)) {
9113 if (insn & (1 << 24)) {
9115 tcg_gen_addi_i32(addr, addr, 4);
9117 /* post increment */
9120 if (insn & (1 << 24)) {
9122 tcg_gen_addi_i32(addr, addr, -(n * 4));
9124 /* post decrement */
9126 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9130 for (i = 0; i < 16; i++) {
9131 if (insn & (1 << i)) {
9134 tmp = tcg_temp_new_i32();
9135 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9137 tmp2 = tcg_const_i32(i);
9138 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9139 tcg_temp_free_i32(tmp2);
9140 tcg_temp_free_i32(tmp);
9141 } else if (i == rn) {
9144 } else if (i == 15 && exc_return) {
9145 store_pc_exc_ret(s, tmp);
9147 store_reg_from_load(s, i, tmp);
9152 tmp = tcg_temp_new_i32();
9153 tcg_gen_movi_i32(tmp, read_pc(s));
9155 tmp = tcg_temp_new_i32();
9156 tmp2 = tcg_const_i32(i);
9157 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9158 tcg_temp_free_i32(tmp2);
9160 tmp = load_reg(s, i);
9162 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9163 tcg_temp_free_i32(tmp);
9166 /* no need to add after the last transfer */
9168 tcg_gen_addi_i32(addr, addr, 4);
9171 if (insn & (1 << 21)) {
9173 if (insn & (1 << 23)) {
9174 if (insn & (1 << 24)) {
9177 /* post increment */
9178 tcg_gen_addi_i32(addr, addr, 4);
9181 if (insn & (1 << 24)) {
9184 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9186 /* post decrement */
9187 tcg_gen_addi_i32(addr, addr, -(n * 4));
9190 store_reg(s, rn, addr);
9192 tcg_temp_free_i32(addr);
9195 store_reg(s, rn, loaded_var);
9198 /* Restore CPSR from SPSR. */
9199 tmp = load_cpu_field(spsr);
9200 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9203 gen_helper_cpsr_write_eret(cpu_env, tmp);
9204 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9207 tcg_temp_free_i32(tmp);
9208 /* Must exit loop to check un-masked IRQs */
9209 s->base.is_jmp = DISAS_EXIT;
9218 /* branch (and link) */
9219 if (insn & (1 << 24)) {
9220 tmp = tcg_temp_new_i32();
9221 tcg_gen_movi_i32(tmp, s->base.pc_next);
9222 store_reg(s, 14, tmp);
9224 offset = sextract32(insn << 2, 0, 26);
9225 gen_jmp(s, read_pc(s) + offset);
9231 if (((insn >> 8) & 0xe) == 10) {
9233 if (disas_vfp_insn(s, insn)) {
9236 } else if (disas_coproc_insn(s, insn)) {
9243 gen_set_pc_im(s, s->base.pc_next);
9244 s->svc_imm = extract32(insn, 0, 24);
9245 s->base.is_jmp = DISAS_SWI;
9249 unallocated_encoding(s);
9255 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
9258 * Return true if this is a 16 bit instruction. We must be precise
9259 * about this (matching the decode).
9261 if ((insn >> 11) < 0x1d) {
9262 /* Definitely a 16-bit instruction */
9266 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9267 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9268 * end up actually treating this as two 16-bit insns, though,
9269 * if it's half of a bl/blx pair that might span a page boundary.
9271 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9272 arm_dc_feature(s, ARM_FEATURE_M)) {
9273 /* Thumb2 cores (including all M profile ones) always treat
9274 * 32-bit insns as 32-bit.
9279 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
9280 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9281 * is not on the next page; we merge this into a 32-bit
9286 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9287 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9288 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9289 * -- handle as single 16 bit insn
9294 /* Return true if this is a Thumb-2 logical op. */
9296 thumb2_logic_op(int op)
9301 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9302 then set condition code flags based on the result of the operation.
9303 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9304 to the high bit of T1.
9305 Returns zero if the opcode is valid. */
9308 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9309 TCGv_i32 t0, TCGv_i32 t1)
9316 tcg_gen_and_i32(t0, t0, t1);
9320 tcg_gen_andc_i32(t0, t0, t1);
9324 tcg_gen_or_i32(t0, t0, t1);
9328 tcg_gen_orc_i32(t0, t0, t1);
9332 tcg_gen_xor_i32(t0, t0, t1);
9337 gen_add_CC(t0, t0, t1);
9339 tcg_gen_add_i32(t0, t0, t1);
9343 gen_adc_CC(t0, t0, t1);
9349 gen_sbc_CC(t0, t0, t1);
9351 gen_sub_carry(t0, t0, t1);
9356 gen_sub_CC(t0, t0, t1);
9358 tcg_gen_sub_i32(t0, t0, t1);
9362 gen_sub_CC(t0, t1, t0);
9364 tcg_gen_sub_i32(t0, t1, t0);
9366 default: /* 5, 6, 7, 9, 12, 15. */
9372 gen_set_CF_bit31(t1);
9377 /* Translate a 32-bit thumb instruction. */
9378 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9380 uint32_t imm, shift, offset;
9381 uint32_t rd, rn, rm, rs;
9393 * ARMv6-M supports a limited subset of Thumb2 instructions.
9394 * Other Thumb1 architectures allow only 32-bit
9395 * combined BL/BLX prefix and suffix.
9397 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9398 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9401 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9402 0xf3b08040 /* dsb */,
9403 0xf3b08050 /* dmb */,
9404 0xf3b08060 /* isb */,
9405 0xf3e08000 /* mrs */,
9406 0xf000d000 /* bl */};
9407 static const uint32_t armv6m_mask[] = {0xffe0d000,
9414 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9415 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9423 } else if ((insn & 0xf800e800) != 0xf000e800) {
9427 rn = (insn >> 16) & 0xf;
9428 rs = (insn >> 12) & 0xf;
9429 rd = (insn >> 8) & 0xf;
9431 switch ((insn >> 25) & 0xf) {
9432 case 0: case 1: case 2: case 3:
9433 /* 16-bit instructions. Should never happen. */
9436 if (insn & (1 << 22)) {
9437 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9438 * - load/store doubleword, load/store exclusive, ldacq/strel,
9441 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9442 arm_dc_feature(s, ARM_FEATURE_V8)) {
9443 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9445 * The bulk of the behaviour for this instruction is implemented
9446 * in v7m_handle_execute_nsc(), which deals with the insn when
9447 * it is executed by a CPU in non-secure state from memory
9448 * which is Secure & NonSecure-Callable.
9449 * Here we only need to handle the remaining cases:
9450 * * in NS memory (including the "security extension not
9451 * implemented" case) : NOP
9452 * * in S memory but CPU already secure (clear IT bits)
9453 * We know that the attribute for the memory this insn is
9454 * in must match the current CPU state, because otherwise
9455 * get_phys_addr_pmsav8 would have generated an exception.
9457 if (s->v8m_secure) {
9458 /* Like the IT insn, we don't need to generate any code */
9459 s->condexec_cond = 0;
9460 s->condexec_mask = 0;
9462 } else if (insn & 0x01200000) {
9463 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9464 * - load/store dual (post-indexed)
9465 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9466 * - load/store dual (literal and immediate)
9467 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9468 * - load/store dual (pre-indexed)
9470 bool wback = extract32(insn, 21, 1);
9472 if (rn == 15 && (insn & (1 << 21))) {
9477 addr = add_reg_for_lit(s, rn, 0);
9478 offset = (insn & 0xff) * 4;
9479 if ((insn & (1 << 23)) == 0) {
9483 if (s->v8m_stackcheck && rn == 13 && wback) {
9485 * Here 'addr' is the current SP; if offset is +ve we're
9486 * moving SP up, else down. It is UNKNOWN whether the limit
9487 * check triggers when SP starts below the limit and ends
9488 * up above it; check whichever of the current and final
9489 * SP is lower, so QEMU will trigger in that situation.
9491 if ((int32_t)offset < 0) {
9492 TCGv_i32 newsp = tcg_temp_new_i32();
9494 tcg_gen_addi_i32(newsp, addr, offset);
9495 gen_helper_v8m_stackcheck(cpu_env, newsp);
9496 tcg_temp_free_i32(newsp);
9498 gen_helper_v8m_stackcheck(cpu_env, addr);
9502 if (insn & (1 << 24)) {
9503 tcg_gen_addi_i32(addr, addr, offset);
9506 if (insn & (1 << 20)) {
9508 tmp = tcg_temp_new_i32();
9509 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9510 store_reg(s, rs, tmp);
9511 tcg_gen_addi_i32(addr, addr, 4);
9512 tmp = tcg_temp_new_i32();
9513 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9514 store_reg(s, rd, tmp);
9517 tmp = load_reg(s, rs);
9518 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9519 tcg_temp_free_i32(tmp);
9520 tcg_gen_addi_i32(addr, addr, 4);
9521 tmp = load_reg(s, rd);
9522 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9523 tcg_temp_free_i32(tmp);
9526 /* Base writeback. */
9527 tcg_gen_addi_i32(addr, addr, offset - 4);
9528 store_reg(s, rn, addr);
9530 tcg_temp_free_i32(addr);
9532 } else if ((insn & (1 << 23)) == 0) {
9533 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9534 * - load/store exclusive word
9538 if (!(insn & (1 << 20)) &&
9539 arm_dc_feature(s, ARM_FEATURE_M) &&
9540 arm_dc_feature(s, ARM_FEATURE_V8)) {
9541 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9544 bool alt = insn & (1 << 7);
9545 TCGv_i32 addr, op, ttresp;
9547 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9548 /* we UNDEF for these UNPREDICTABLE cases */
9552 if (alt && !s->v8m_secure) {
9556 addr = load_reg(s, rn);
9557 op = tcg_const_i32(extract32(insn, 6, 2));
9558 ttresp = tcg_temp_new_i32();
9559 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9560 tcg_temp_free_i32(addr);
9561 tcg_temp_free_i32(op);
9562 store_reg(s, rd, ttresp);
9567 addr = tcg_temp_local_new_i32();
9568 load_reg_var(s, addr, rn);
9569 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9570 if (insn & (1 << 20)) {
9571 gen_load_exclusive(s, rs, 15, addr, 2);
9573 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9575 tcg_temp_free_i32(addr);
9576 } else if ((insn & (7 << 5)) == 0) {
9578 addr = load_reg(s, rn);
9579 tmp = load_reg(s, rm);
9580 tcg_gen_add_i32(addr, addr, tmp);
9581 if (insn & (1 << 4)) {
9583 tcg_gen_add_i32(addr, addr, tmp);
9584 tcg_temp_free_i32(tmp);
9585 tmp = tcg_temp_new_i32();
9586 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9588 tcg_temp_free_i32(tmp);
9589 tmp = tcg_temp_new_i32();
9590 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9592 tcg_temp_free_i32(addr);
9593 tcg_gen_shli_i32(tmp, tmp, 1);
9594 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
9595 store_reg(s, 15, tmp);
9597 bool is_lasr = false;
9598 bool is_ld = extract32(insn, 20, 1);
9599 int op2 = (insn >> 6) & 0x3;
9600 op = (insn >> 4) & 0x3;
9605 /* Load/store exclusive byte/halfword/doubleword */
9612 /* Load-acquire/store-release */
9618 /* Load-acquire/store-release exclusive */
9624 if (is_lasr && !is_ld) {
9625 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9628 addr = tcg_temp_local_new_i32();
9629 load_reg_var(s, addr, rn);
9632 tmp = tcg_temp_new_i32();
9635 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9639 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9643 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9649 store_reg(s, rs, tmp);
9651 tmp = load_reg(s, rs);
9654 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9658 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9662 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9668 tcg_temp_free_i32(tmp);
9671 gen_load_exclusive(s, rs, rd, addr, op);
9673 gen_store_exclusive(s, rm, rs, rd, addr, op);
9675 tcg_temp_free_i32(addr);
9677 if (is_lasr && is_ld) {
9678 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9682 /* Load/store multiple, RFE, SRS. */
9683 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9684 /* RFE, SRS: not available in user mode or on M profile */
9685 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9688 if (insn & (1 << 20)) {
9690 addr = load_reg(s, rn);
9691 if ((insn & (1 << 24)) == 0)
9692 tcg_gen_addi_i32(addr, addr, -8);
9693 /* Load PC into tmp and CPSR into tmp2. */
9694 tmp = tcg_temp_new_i32();
9695 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9696 tcg_gen_addi_i32(addr, addr, 4);
9697 tmp2 = tcg_temp_new_i32();
9698 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9699 if (insn & (1 << 21)) {
9700 /* Base writeback. */
9701 if (insn & (1 << 24)) {
9702 tcg_gen_addi_i32(addr, addr, 4);
9704 tcg_gen_addi_i32(addr, addr, -4);
9706 store_reg(s, rn, addr);
9708 tcg_temp_free_i32(addr);
9710 gen_rfe(s, tmp, tmp2);
9713 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9717 int i, loaded_base = 0;
9718 TCGv_i32 loaded_var;
9719 bool wback = extract32(insn, 21, 1);
9720 /* Load/store multiple. */
9721 addr = load_reg(s, rn);
9723 for (i = 0; i < 16; i++) {
9724 if (insn & (1 << i))
9728 if (insn & (1 << 24)) {
9729 tcg_gen_addi_i32(addr, addr, -offset);
9732 if (s->v8m_stackcheck && rn == 13 && wback) {
9734 * If the writeback is incrementing SP rather than
9735 * decrementing it, and the initial SP is below the
9736 * stack limit but the final written-back SP would
9737 * be above, then then we must not perform any memory
9738 * accesses, but it is IMPDEF whether we generate
9739 * an exception. We choose to do so in this case.
9740 * At this point 'addr' is the lowest address, so
9741 * either the original SP (if incrementing) or our
9742 * final SP (if decrementing), so that's what we check.
9744 gen_helper_v8m_stackcheck(cpu_env, addr);
9748 for (i = 0; i < 16; i++) {
9749 if ((insn & (1 << i)) == 0)
9751 if (insn & (1 << 20)) {
9753 tmp = tcg_temp_new_i32();
9754 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9756 gen_bx_excret(s, tmp);
9757 } else if (i == rn) {
9761 store_reg(s, i, tmp);
9765 tmp = load_reg(s, i);
9766 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9767 tcg_temp_free_i32(tmp);
9769 tcg_gen_addi_i32(addr, addr, 4);
9772 store_reg(s, rn, loaded_var);
9775 /* Base register writeback. */
9776 if (insn & (1 << 24)) {
9777 tcg_gen_addi_i32(addr, addr, -offset);
9779 /* Fault if writeback register is in register list. */
9780 if (insn & (1 << rn))
9782 store_reg(s, rn, addr);
9784 tcg_temp_free_i32(addr);
9791 op = (insn >> 21) & 0xf;
9793 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9796 /* Halfword pack. */
9797 tmp = load_reg(s, rn);
9798 tmp2 = load_reg(s, rm);
9799 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9800 if (insn & (1 << 5)) {
9805 tcg_gen_sari_i32(tmp2, tmp2, shift);
9806 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
9809 tcg_gen_shli_i32(tmp2, tmp2, shift);
9810 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
9812 tcg_temp_free_i32(tmp2);
9813 store_reg(s, rd, tmp);
9815 /* Data processing register constant shift. */
9817 tmp = tcg_temp_new_i32();
9818 tcg_gen_movi_i32(tmp, 0);
9820 tmp = load_reg(s, rn);
9822 tmp2 = load_reg(s, rm);
9824 shiftop = (insn >> 4) & 3;
9825 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9826 conds = (insn & (1 << 20)) != 0;
9827 logic_cc = (conds && thumb2_logic_op(op));
9828 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9829 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9831 tcg_temp_free_i32(tmp2);
9833 ((op == 2 && rn == 15) ||
9834 (op == 8 && rn == 13) ||
9835 (op == 13 && rn == 13))) {
9836 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
9837 store_sp_checked(s, tmp);
9838 } else if (rd != 15) {
9839 store_reg(s, rd, tmp);
9841 tcg_temp_free_i32(tmp);
9845 case 13: /* Misc data processing. */
9846 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9847 if (op < 4 && (insn & 0xf000) != 0xf000)
9850 case 0: /* Register controlled shift. */
9851 tmp = load_reg(s, rn);
9852 tmp2 = load_reg(s, rm);
9853 if ((insn & 0x70) != 0)
9856 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
9857 * - MOV, MOVS (register-shifted register), flagsetting
9859 op = (insn >> 21) & 3;
9860 logic_cc = (insn & (1 << 20)) != 0;
9861 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9864 store_reg(s, rd, tmp);
9866 case 1: /* Sign/zero extend. */
9867 op = (insn >> 20) & 7;
9869 case 0: /* SXTAH, SXTH */
9870 case 1: /* UXTAH, UXTH */
9871 case 4: /* SXTAB, SXTB */
9872 case 5: /* UXTAB, UXTB */
9874 case 2: /* SXTAB16, SXTB16 */
9875 case 3: /* UXTAB16, UXTB16 */
9876 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9884 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9888 tmp = load_reg(s, rm);
9889 shift = (insn >> 4) & 3;
9890 /* ??? In many cases it's not necessary to do a
9891 rotate, a shift is sufficient. */
9893 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9894 op = (insn >> 20) & 7;
9896 case 0: gen_sxth(tmp); break;
9897 case 1: gen_uxth(tmp); break;
9898 case 2: gen_sxtb16(tmp); break;
9899 case 3: gen_uxtb16(tmp); break;
9900 case 4: gen_sxtb(tmp); break;
9901 case 5: gen_uxtb(tmp); break;
9903 g_assert_not_reached();
9906 tmp2 = load_reg(s, rn);
9907 if ((op >> 1) == 1) {
9908 gen_add16(tmp, tmp2);
9910 tcg_gen_add_i32(tmp, tmp, tmp2);
9911 tcg_temp_free_i32(tmp2);
9914 store_reg(s, rd, tmp);
9916 case 2: /* SIMD add/subtract. */
9917 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9920 op = (insn >> 20) & 7;
9921 shift = (insn >> 4) & 7;
9922 if ((op & 3) == 3 || (shift & 3) == 3)
9924 tmp = load_reg(s, rn);
9925 tmp2 = load_reg(s, rm);
9926 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9927 tcg_temp_free_i32(tmp2);
9928 store_reg(s, rd, tmp);
9930 case 3: /* Other data processing. */
9931 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9933 /* Saturating add/subtract. */
9934 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9937 tmp = load_reg(s, rn);
9938 tmp2 = load_reg(s, rm);
9940 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
9942 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9944 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9945 tcg_temp_free_i32(tmp2);
9948 case 0x0a: /* rbit */
9949 case 0x08: /* rev */
9950 case 0x09: /* rev16 */
9951 case 0x0b: /* revsh */
9952 case 0x18: /* clz */
9954 case 0x10: /* sel */
9955 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9959 case 0x20: /* crc32/crc32c */
9965 if (!dc_isar_feature(aa32_crc32, s)) {
9972 tmp = load_reg(s, rn);
9974 case 0x0a: /* rbit */
9975 gen_helper_rbit(tmp, tmp);
9977 case 0x08: /* rev */
9978 tcg_gen_bswap32_i32(tmp, tmp);
9980 case 0x09: /* rev16 */
9983 case 0x0b: /* revsh */
9986 case 0x10: /* sel */
9987 tmp2 = load_reg(s, rm);
9988 tmp3 = tcg_temp_new_i32();
9989 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9990 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9991 tcg_temp_free_i32(tmp3);
9992 tcg_temp_free_i32(tmp2);
9994 case 0x18: /* clz */
9995 tcg_gen_clzi_i32(tmp, tmp, 32);
10005 uint32_t sz = op & 0x3;
10006 uint32_t c = op & 0x8;
10008 tmp2 = load_reg(s, rm);
10010 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10011 } else if (sz == 1) {
10012 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10014 tmp3 = tcg_const_i32(1 << sz);
10016 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10018 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10020 tcg_temp_free_i32(tmp2);
10021 tcg_temp_free_i32(tmp3);
10025 g_assert_not_reached();
10028 store_reg(s, rd, tmp);
10030 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10031 switch ((insn >> 20) & 7) {
10032 case 0: /* 32 x 32 -> 32 */
10033 case 7: /* Unsigned sum of absolute differences. */
10035 case 1: /* 16 x 16 -> 32 */
10036 case 2: /* Dual multiply add. */
10037 case 3: /* 32 * 16 -> 32msb */
10038 case 4: /* Dual multiply subtract. */
10039 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10040 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10045 op = (insn >> 4) & 0xf;
10046 tmp = load_reg(s, rn);
10047 tmp2 = load_reg(s, rm);
10048 switch ((insn >> 20) & 7) {
10049 case 0: /* 32 x 32 -> 32 */
10050 tcg_gen_mul_i32(tmp, tmp, tmp2);
10051 tcg_temp_free_i32(tmp2);
10053 tmp2 = load_reg(s, rs);
10055 tcg_gen_sub_i32(tmp, tmp2, tmp);
10057 tcg_gen_add_i32(tmp, tmp, tmp2);
10058 tcg_temp_free_i32(tmp2);
10061 case 1: /* 16 x 16 -> 32 */
10062 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10063 tcg_temp_free_i32(tmp2);
10065 tmp2 = load_reg(s, rs);
10066 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10067 tcg_temp_free_i32(tmp2);
10070 case 2: /* Dual multiply add. */
10071 case 4: /* Dual multiply subtract. */
10073 gen_swap_half(tmp2);
10074 gen_smul_dual(tmp, tmp2);
10075 if (insn & (1 << 22)) {
10076 /* This subtraction cannot overflow. */
10077 tcg_gen_sub_i32(tmp, tmp, tmp2);
10079 /* This addition cannot overflow 32 bits;
10080 * however it may overflow considered as a signed
10081 * operation, in which case we must set the Q flag.
10083 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10085 tcg_temp_free_i32(tmp2);
10088 tmp2 = load_reg(s, rs);
10089 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10090 tcg_temp_free_i32(tmp2);
10093 case 3: /* 32 * 16 -> 32msb */
10095 tcg_gen_sari_i32(tmp2, tmp2, 16);
10098 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10099 tcg_gen_shri_i64(tmp64, tmp64, 16);
10100 tmp = tcg_temp_new_i32();
10101 tcg_gen_extrl_i64_i32(tmp, tmp64);
10102 tcg_temp_free_i64(tmp64);
10105 tmp2 = load_reg(s, rs);
10106 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10107 tcg_temp_free_i32(tmp2);
10110 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10111 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10113 tmp = load_reg(s, rs);
10114 if (insn & (1 << 20)) {
10115 tmp64 = gen_addq_msw(tmp64, tmp);
10117 tmp64 = gen_subq_msw(tmp64, tmp);
10120 if (insn & (1 << 4)) {
10121 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10123 tcg_gen_shri_i64(tmp64, tmp64, 32);
10124 tmp = tcg_temp_new_i32();
10125 tcg_gen_extrl_i64_i32(tmp, tmp64);
10126 tcg_temp_free_i64(tmp64);
10128 case 7: /* Unsigned sum of absolute differences. */
10129 gen_helper_usad8(tmp, tmp, tmp2);
10130 tcg_temp_free_i32(tmp2);
10132 tmp2 = load_reg(s, rs);
10133 tcg_gen_add_i32(tmp, tmp, tmp2);
10134 tcg_temp_free_i32(tmp2);
10138 store_reg(s, rd, tmp);
10140 case 6: case 7: /* 64-bit multiply, Divide. */
10141 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10142 tmp = load_reg(s, rn);
10143 tmp2 = load_reg(s, rm);
10144 if ((op & 0x50) == 0x10) {
10146 if (!dc_isar_feature(thumb_div, s)) {
10150 gen_helper_udiv(tmp, tmp, tmp2);
10152 gen_helper_sdiv(tmp, tmp, tmp2);
10153 tcg_temp_free_i32(tmp2);
10154 store_reg(s, rd, tmp);
10155 } else if ((op & 0xe) == 0xc) {
10156 /* Dual multiply accumulate long. */
10157 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10158 tcg_temp_free_i32(tmp);
10159 tcg_temp_free_i32(tmp2);
10163 gen_swap_half(tmp2);
10164 gen_smul_dual(tmp, tmp2);
10166 tcg_gen_sub_i32(tmp, tmp, tmp2);
10168 tcg_gen_add_i32(tmp, tmp, tmp2);
10170 tcg_temp_free_i32(tmp2);
10172 tmp64 = tcg_temp_new_i64();
10173 tcg_gen_ext_i32_i64(tmp64, tmp);
10174 tcg_temp_free_i32(tmp);
10175 gen_addq(s, tmp64, rs, rd);
10176 gen_storeq_reg(s, rs, rd, tmp64);
10177 tcg_temp_free_i64(tmp64);
10180 /* Unsigned 64-bit multiply */
10181 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10185 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10186 tcg_temp_free_i32(tmp2);
10187 tcg_temp_free_i32(tmp);
10190 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10191 tcg_temp_free_i32(tmp2);
10192 tmp64 = tcg_temp_new_i64();
10193 tcg_gen_ext_i32_i64(tmp64, tmp);
10194 tcg_temp_free_i32(tmp);
10196 /* Signed 64-bit multiply */
10197 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10202 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10203 tcg_temp_free_i64(tmp64);
10206 gen_addq_lo(s, tmp64, rs);
10207 gen_addq_lo(s, tmp64, rd);
10208 } else if (op & 0x40) {
10209 /* 64-bit accumulate. */
10210 gen_addq(s, tmp64, rs, rd);
10212 gen_storeq_reg(s, rs, rd, tmp64);
10213 tcg_temp_free_i64(tmp64);
10218 case 6: case 7: case 14: case 15:
10220 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10221 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10222 if (extract32(insn, 24, 2) == 3) {
10223 goto illegal_op; /* op0 = 0b11 : unallocated */
10227 * Decode VLLDM and VLSTM first: these are nonstandard because:
10228 * * if there is no FPU then these insns must NOP in
10229 * Secure state and UNDEF in Nonsecure state
10230 * * if there is an FPU then these insns do not have
10231 * the usual behaviour that disas_vfp_insn() provides of
10232 * being controlled by CPACR/NSACR enable bits or the
10233 * lazy-stacking logic.
10235 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10236 (insn & 0xffa00f00) == 0xec200a00) {
10237 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10239 * We choose to UNDEF if the RAZ bits are non-zero.
10241 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10245 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10246 TCGv_i32 fptr = load_reg(s, rn);
10248 if (extract32(insn, 20, 1)) {
10249 gen_helper_v7m_vlldm(cpu_env, fptr);
10251 gen_helper_v7m_vlstm(cpu_env, fptr);
10253 tcg_temp_free_i32(fptr);
10255 /* End the TB, because we have updated FP control bits */
10256 s->base.is_jmp = DISAS_UPDATE;
10260 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10261 ((insn >> 8) & 0xe) == 10) {
10262 /* FP, and the CPU supports it */
10263 if (disas_vfp_insn(s, insn)) {
10269 /* All other insns: NOCP */
10270 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
10271 default_exception_el(s));
10274 if ((insn & 0xfe000a00) == 0xfc000800
10275 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10276 /* The Thumb2 and ARM encodings are identical. */
10277 if (disas_neon_insn_3same_ext(s, insn)) {
10280 } else if ((insn & 0xff000a00) == 0xfe000800
10281 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10282 /* The Thumb2 and ARM encodings are identical. */
10283 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10286 } else if (((insn >> 24) & 3) == 3) {
10287 /* Translate into the equivalent ARM encoding. */
10288 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10289 if (disas_neon_data_insn(s, insn)) {
10292 } else if (((insn >> 8) & 0xe) == 10) {
10293 if (disas_vfp_insn(s, insn)) {
10297 if (insn & (1 << 28))
10299 if (disas_coproc_insn(s, insn)) {
10304 case 8: case 9: case 10: case 11:
10305 if (insn & (1 << 15)) {
10306 /* Branches, misc control. */
10307 if (insn & 0x5000) {
10308 /* Unconditional branch. */
10309 /* signextend(hw1[10:0]) -> offset[:12]. */
10310 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10311 /* hw1[10:0] -> offset[11:1]. */
10312 offset |= (insn & 0x7ff) << 1;
10313 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10314 offset[24:22] already have the same value because of the
10315 sign extension above. */
10316 offset ^= ((~insn) & (1 << 13)) << 10;
10317 offset ^= ((~insn) & (1 << 11)) << 11;
10319 if (insn & (1 << 14)) {
10320 /* Branch and link. */
10321 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
10324 offset += read_pc(s);
10325 if (insn & (1 << 12)) {
10327 gen_jmp(s, offset);
10330 offset &= ~(uint32_t)2;
10331 /* thumb2 bx, no need to check */
10332 gen_bx_im(s, offset);
10334 } else if (((insn >> 23) & 7) == 7) {
10336 if (insn & (1 << 13))
10339 if (insn & (1 << 26)) {
10340 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10343 if (!(insn & (1 << 20))) {
10344 /* Hypervisor call (v7) */
10345 int imm16 = extract32(insn, 16, 4) << 12
10346 | extract32(insn, 0, 12);
10353 /* Secure monitor call (v6+) */
10361 op = (insn >> 20) & 7;
10363 case 0: /* msr cpsr. */
10364 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10365 tmp = load_reg(s, rn);
10366 /* the constant is the mask and SYSm fields */
10367 addr = tcg_const_i32(insn & 0xfff);
10368 gen_helper_v7m_msr(cpu_env, addr, tmp);
10369 tcg_temp_free_i32(addr);
10370 tcg_temp_free_i32(tmp);
10375 case 1: /* msr spsr. */
10376 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10380 if (extract32(insn, 5, 1)) {
10382 int sysm = extract32(insn, 8, 4) |
10383 (extract32(insn, 4, 1) << 4);
10386 gen_msr_banked(s, r, sysm, rm);
10390 /* MSR (for PSRs) */
10391 tmp = load_reg(s, rn);
10393 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10397 case 2: /* cps, nop-hint. */
10398 if (((insn >> 8) & 7) == 0) {
10399 gen_nop_hint(s, insn & 0xff);
10401 /* Implemented as NOP in user mode. */
10406 if (insn & (1 << 10)) {
10407 if (insn & (1 << 7))
10409 if (insn & (1 << 6))
10411 if (insn & (1 << 5))
10413 if (insn & (1 << 9))
10414 imm = CPSR_A | CPSR_I | CPSR_F;
10416 if (insn & (1 << 8)) {
10418 imm |= (insn & 0x1f);
10421 gen_set_psr_im(s, offset, 0, imm);
10424 case 3: /* Special control operations. */
10425 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
10426 !arm_dc_feature(s, ARM_FEATURE_M)) {
10429 op = (insn >> 4) & 0xf;
10431 case 2: /* clrex */
10436 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10439 /* We need to break the TB after this insn
10440 * to execute self-modifying code correctly
10441 * and also to take any pending interrupts
10444 gen_goto_tb(s, 0, s->base.pc_next);
10447 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10451 * TODO: There is no speculation barrier opcode
10452 * for TCG; MB and end the TB instead.
10454 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10455 gen_goto_tb(s, 0, s->base.pc_next);
10462 /* Trivial implementation equivalent to bx.
10463 * This instruction doesn't exist at all for M-profile.
10465 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10468 tmp = load_reg(s, rn);
10471 case 5: /* Exception return. */
10475 if (rn != 14 || rd != 15) {
10478 if (s->current_el == 2) {
10479 /* ERET from Hyp uses ELR_Hyp, not LR */
10483 tmp = load_cpu_field(elr_el[2]);
10485 tmp = load_reg(s, rn);
10486 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10488 gen_exception_return(s, tmp);
10491 if (extract32(insn, 5, 1) &&
10492 !arm_dc_feature(s, ARM_FEATURE_M)) {
10494 int sysm = extract32(insn, 16, 4) |
10495 (extract32(insn, 4, 1) << 4);
10497 gen_mrs_banked(s, 0, sysm, rd);
10501 if (extract32(insn, 16, 4) != 0xf) {
10504 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10505 extract32(insn, 0, 8) != 0) {
10510 tmp = tcg_temp_new_i32();
10511 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10512 addr = tcg_const_i32(insn & 0xff);
10513 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10514 tcg_temp_free_i32(addr);
10516 gen_helper_cpsr_read(tmp, cpu_env);
10518 store_reg(s, rd, tmp);
10521 if (extract32(insn, 5, 1) &&
10522 !arm_dc_feature(s, ARM_FEATURE_M)) {
10524 int sysm = extract32(insn, 16, 4) |
10525 (extract32(insn, 4, 1) << 4);
10527 gen_mrs_banked(s, 1, sysm, rd);
10532 /* Not accessible in user mode. */
10533 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10537 if (extract32(insn, 16, 4) != 0xf ||
10538 extract32(insn, 0, 8) != 0) {
10542 tmp = load_cpu_field(spsr);
10543 store_reg(s, rd, tmp);
10548 /* Conditional branch. */
10549 op = (insn >> 22) & 0xf;
10550 /* Generate a conditional jump to next instruction. */
10551 arm_skip_unless(s, op);
10553 /* offset[11:1] = insn[10:0] */
10554 offset = (insn & 0x7ff) << 1;
10555 /* offset[17:12] = insn[21:16]. */
10556 offset |= (insn & 0x003f0000) >> 4;
10557 /* offset[31:20] = insn[26]. */
10558 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10559 /* offset[18] = insn[13]. */
10560 offset |= (insn & (1 << 13)) << 5;
10561 /* offset[19] = insn[11]. */
10562 offset |= (insn & (1 << 11)) << 8;
10564 /* jump to the offset */
10565 gen_jmp(s, read_pc(s) + offset);
10569 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10570 * - Data-processing (modified immediate, plain binary immediate)
10572 if (insn & (1 << 25)) {
10574 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10575 * - Data-processing (plain binary immediate)
10577 if (insn & (1 << 24)) {
10578 if (insn & (1 << 20))
10580 /* Bitfield/Saturate. */
10581 op = (insn >> 21) & 7;
10583 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10585 tmp = tcg_temp_new_i32();
10586 tcg_gen_movi_i32(tmp, 0);
10588 tmp = load_reg(s, rn);
10591 case 2: /* Signed bitfield extract. */
10593 if (shift + imm > 32)
10596 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10599 case 6: /* Unsigned bitfield extract. */
10601 if (shift + imm > 32)
10604 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10607 case 3: /* Bitfield insert/clear. */
10610 imm = imm + 1 - shift;
10612 tmp2 = load_reg(s, rd);
10613 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10614 tcg_temp_free_i32(tmp2);
10619 default: /* Saturate. */
10622 tcg_gen_sari_i32(tmp, tmp, shift);
10624 tcg_gen_shli_i32(tmp, tmp, shift);
10626 tmp2 = tcg_const_i32(imm);
10629 if ((op & 1) && shift == 0) {
10630 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10631 tcg_temp_free_i32(tmp);
10632 tcg_temp_free_i32(tmp2);
10635 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10637 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10641 if ((op & 1) && shift == 0) {
10642 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10643 tcg_temp_free_i32(tmp);
10644 tcg_temp_free_i32(tmp2);
10647 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10649 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10652 tcg_temp_free_i32(tmp2);
10655 store_reg(s, rd, tmp);
10657 imm = ((insn & 0x04000000) >> 15)
10658 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10659 if (insn & (1 << 22)) {
10660 /* 16-bit immediate. */
10661 imm |= (insn >> 4) & 0xf000;
10662 if (insn & (1 << 23)) {
10664 tmp = load_reg(s, rd);
10665 tcg_gen_ext16u_i32(tmp, tmp);
10666 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10669 tmp = tcg_temp_new_i32();
10670 tcg_gen_movi_i32(tmp, imm);
10672 store_reg(s, rd, tmp);
10674 /* Add/sub 12-bit immediate. */
10675 if (insn & (1 << 23)) {
10678 tmp = add_reg_for_lit(s, rn, imm);
10679 if (rn == 13 && rd == 13) {
10680 /* ADD SP, SP, imm or SUB SP, SP, imm */
10681 store_sp_checked(s, tmp);
10683 store_reg(s, rd, tmp);
10689 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
10690 * - Data-processing (modified immediate)
10692 int shifter_out = 0;
10693 /* modified 12-bit immediate. */
10694 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10695 imm = (insn & 0xff);
10698 /* Nothing to do. */
10700 case 1: /* 00XY00XY */
10703 case 2: /* XY00XY00 */
10707 case 3: /* XYXYXYXY */
10711 default: /* Rotated constant. */
10712 shift = (shift << 1) | (imm >> 7);
10714 imm = imm << (32 - shift);
10718 tmp2 = tcg_temp_new_i32();
10719 tcg_gen_movi_i32(tmp2, imm);
10720 rn = (insn >> 16) & 0xf;
10722 tmp = tcg_temp_new_i32();
10723 tcg_gen_movi_i32(tmp, 0);
10725 tmp = load_reg(s, rn);
10727 op = (insn >> 21) & 0xf;
10728 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10729 shifter_out, tmp, tmp2))
10731 tcg_temp_free_i32(tmp2);
10732 rd = (insn >> 8) & 0xf;
10733 if (rd == 13 && rn == 13
10734 && (op == 8 || op == 13)) {
10735 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
10736 store_sp_checked(s, tmp);
10737 } else if (rd != 15) {
10738 store_reg(s, rd, tmp);
10740 tcg_temp_free_i32(tmp);
10745 case 12: /* Load/store single data item. */
10752 if ((insn & 0x01100000) == 0x01000000) {
10753 if (disas_neon_ls_insn(s, insn)) {
10758 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10760 if (!(insn & (1 << 20))) {
10764 /* Byte or halfword load space with dest == r15 : memory hints.
10765 * Catch them early so we don't emit pointless addressing code.
10766 * This space is a mix of:
10767 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10768 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10770 * unallocated hints, which must be treated as NOPs
10771 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10772 * which is easiest for the decoding logic
10773 * Some space which must UNDEF
10775 int op1 = (insn >> 23) & 3;
10776 int op2 = (insn >> 6) & 0x3f;
10781 /* UNPREDICTABLE, unallocated hint or
10782 * PLD/PLDW/PLI (literal)
10787 return; /* PLD/PLDW/PLI or unallocated hint */
10789 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10790 return; /* PLD/PLDW/PLI or unallocated hint */
10792 /* UNDEF space, or an UNPREDICTABLE */
10796 memidx = get_mem_index(s);
10797 imm = insn & 0xfff;
10798 if (insn & (1 << 23)) {
10799 /* PC relative or Positive offset. */
10800 addr = add_reg_for_lit(s, rn, imm);
10801 } else if (rn == 15) {
10802 /* PC relative with negative offset. */
10803 addr = add_reg_for_lit(s, rn, -imm);
10805 addr = load_reg(s, rn);
10807 switch ((insn >> 8) & 0xf) {
10808 case 0x0: /* Shifted Register. */
10809 shift = (insn >> 4) & 0xf;
10811 tcg_temp_free_i32(addr);
10814 tmp = load_reg(s, rm);
10816 tcg_gen_shli_i32(tmp, tmp, shift);
10818 tcg_gen_add_i32(addr, addr, tmp);
10819 tcg_temp_free_i32(tmp);
10821 case 0xc: /* Negative offset. */
10822 tcg_gen_addi_i32(addr, addr, -imm);
10824 case 0xe: /* User privilege. */
10825 tcg_gen_addi_i32(addr, addr, imm);
10826 memidx = get_a32_user_mem_index(s);
10828 case 0x9: /* Post-decrement. */
10830 /* Fall through. */
10831 case 0xb: /* Post-increment. */
10835 case 0xd: /* Pre-decrement. */
10837 /* Fall through. */
10838 case 0xf: /* Pre-increment. */
10842 tcg_temp_free_i32(addr);
10847 issinfo = writeback ? ISSInvalid : rs;
10849 if (s->v8m_stackcheck && rn == 13 && writeback) {
10851 * Stackcheck. Here we know 'addr' is the current SP;
10852 * if imm is +ve we're moving SP up, else down. It is
10853 * UNKNOWN whether the limit check triggers when SP starts
10854 * below the limit and ends up above it; we chose to do so.
10856 if ((int32_t)imm < 0) {
10857 TCGv_i32 newsp = tcg_temp_new_i32();
10859 tcg_gen_addi_i32(newsp, addr, imm);
10860 gen_helper_v8m_stackcheck(cpu_env, newsp);
10861 tcg_temp_free_i32(newsp);
10863 gen_helper_v8m_stackcheck(cpu_env, addr);
10867 if (writeback && !postinc) {
10868 tcg_gen_addi_i32(addr, addr, imm);
10871 if (insn & (1 << 20)) {
10873 tmp = tcg_temp_new_i32();
10876 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
10879 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
10882 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
10885 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
10888 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
10891 tcg_temp_free_i32(tmp);
10892 tcg_temp_free_i32(addr);
10896 gen_bx_excret(s, tmp);
10898 store_reg(s, rs, tmp);
10902 tmp = load_reg(s, rs);
10905 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
10908 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
10911 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
10914 tcg_temp_free_i32(tmp);
10915 tcg_temp_free_i32(addr);
10918 tcg_temp_free_i32(tmp);
10921 tcg_gen_addi_i32(addr, addr, imm);
10923 store_reg(s, rn, addr);
10925 tcg_temp_free_i32(addr);
10934 unallocated_encoding(s);
10937 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
10939 uint32_t val, op, rm, rn, rd, shift, cond;
10946 switch (insn >> 12) {
10950 op = (insn >> 11) & 3;
10953 * 0b0001_1xxx_xxxx_xxxx
10954 * - Add, subtract (three low registers)
10955 * - Add, subtract (two low registers and immediate)
10957 rn = (insn >> 3) & 7;
10958 tmp = load_reg(s, rn);
10959 if (insn & (1 << 10)) {
10961 tmp2 = tcg_temp_new_i32();
10962 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10965 rm = (insn >> 6) & 7;
10966 tmp2 = load_reg(s, rm);
10968 if (insn & (1 << 9)) {
10969 if (s->condexec_mask)
10970 tcg_gen_sub_i32(tmp, tmp, tmp2);
10972 gen_sub_CC(tmp, tmp, tmp2);
10974 if (s->condexec_mask)
10975 tcg_gen_add_i32(tmp, tmp, tmp2);
10977 gen_add_CC(tmp, tmp, tmp2);
10979 tcg_temp_free_i32(tmp2);
10980 store_reg(s, rd, tmp);
10982 /* shift immediate */
10983 rm = (insn >> 3) & 7;
10984 shift = (insn >> 6) & 0x1f;
10985 tmp = load_reg(s, rm);
10986 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10987 if (!s->condexec_mask)
10989 store_reg(s, rd, tmp);
10994 * 0b001x_xxxx_xxxx_xxxx
10995 * - Add, subtract, compare, move (one low register and immediate)
10997 op = (insn >> 11) & 3;
10998 rd = (insn >> 8) & 0x7;
10999 if (op == 0) { /* mov */
11000 tmp = tcg_temp_new_i32();
11001 tcg_gen_movi_i32(tmp, insn & 0xff);
11002 if (!s->condexec_mask)
11004 store_reg(s, rd, tmp);
11006 tmp = load_reg(s, rd);
11007 tmp2 = tcg_temp_new_i32();
11008 tcg_gen_movi_i32(tmp2, insn & 0xff);
11011 gen_sub_CC(tmp, tmp, tmp2);
11012 tcg_temp_free_i32(tmp);
11013 tcg_temp_free_i32(tmp2);
11016 if (s->condexec_mask)
11017 tcg_gen_add_i32(tmp, tmp, tmp2);
11019 gen_add_CC(tmp, tmp, tmp2);
11020 tcg_temp_free_i32(tmp2);
11021 store_reg(s, rd, tmp);
11024 if (s->condexec_mask)
11025 tcg_gen_sub_i32(tmp, tmp, tmp2);
11027 gen_sub_CC(tmp, tmp, tmp2);
11028 tcg_temp_free_i32(tmp2);
11029 store_reg(s, rd, tmp);
11035 if (insn & (1 << 11)) {
11036 rd = (insn >> 8) & 7;
11037 /* load pc-relative. Bit 1 of PC is ignored. */
11038 addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
11039 tmp = tcg_temp_new_i32();
11040 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11042 tcg_temp_free_i32(addr);
11043 store_reg(s, rd, tmp);
11046 if (insn & (1 << 10)) {
11047 /* 0b0100_01xx_xxxx_xxxx
11048 * - data processing extended, branch and exchange
11050 rd = (insn & 7) | ((insn >> 4) & 8);
11051 rm = (insn >> 3) & 0xf;
11052 op = (insn >> 8) & 3;
11055 tmp = load_reg(s, rd);
11056 tmp2 = load_reg(s, rm);
11057 tcg_gen_add_i32(tmp, tmp, tmp2);
11058 tcg_temp_free_i32(tmp2);
11060 /* ADD SP, SP, reg */
11061 store_sp_checked(s, tmp);
11063 store_reg(s, rd, tmp);
11067 tmp = load_reg(s, rd);
11068 tmp2 = load_reg(s, rm);
11069 gen_sub_CC(tmp, tmp, tmp2);
11070 tcg_temp_free_i32(tmp2);
11071 tcg_temp_free_i32(tmp);
11073 case 2: /* mov/cpy */
11074 tmp = load_reg(s, rm);
11077 store_sp_checked(s, tmp);
11079 store_reg(s, rd, tmp);
11084 /* 0b0100_0111_xxxx_xxxx
11085 * - branch [and link] exchange thumb register
11087 bool link = insn & (1 << 7);
11096 /* BXNS/BLXNS: only exists for v8M with the
11097 * security extensions, and always UNDEF if NonSecure.
11098 * We don't implement these in the user-only mode
11099 * either (in theory you can use them from Secure User
11100 * mode but they are too tied in to system emulation.)
11102 if (!s->v8m_secure || IS_USER_ONLY) {
11113 tmp = load_reg(s, rm);
11115 val = (uint32_t)s->base.pc_next | 1;
11116 tmp2 = tcg_temp_new_i32();
11117 tcg_gen_movi_i32(tmp2, val);
11118 store_reg(s, 14, tmp2);
11121 /* Only BX works as exception-return, not BLX */
11122 gen_bx_excret(s, tmp);
11131 * 0b0100_00xx_xxxx_xxxx
11132 * - Data-processing (two low registers)
11135 rm = (insn >> 3) & 7;
11136 op = (insn >> 6) & 0xf;
11137 if (op == 2 || op == 3 || op == 4 || op == 7) {
11138 /* the shift/rotate ops want the operands backwards */
11147 if (op == 9) { /* neg */
11148 tmp = tcg_temp_new_i32();
11149 tcg_gen_movi_i32(tmp, 0);
11150 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11151 tmp = load_reg(s, rd);
11156 tmp2 = load_reg(s, rm);
11158 case 0x0: /* and */
11159 tcg_gen_and_i32(tmp, tmp, tmp2);
11160 if (!s->condexec_mask)
11163 case 0x1: /* eor */
11164 tcg_gen_xor_i32(tmp, tmp, tmp2);
11165 if (!s->condexec_mask)
11168 case 0x2: /* lsl */
11169 if (s->condexec_mask) {
11170 gen_shl(tmp2, tmp2, tmp);
11172 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11173 gen_logic_CC(tmp2);
11176 case 0x3: /* lsr */
11177 if (s->condexec_mask) {
11178 gen_shr(tmp2, tmp2, tmp);
11180 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11181 gen_logic_CC(tmp2);
11184 case 0x4: /* asr */
11185 if (s->condexec_mask) {
11186 gen_sar(tmp2, tmp2, tmp);
11188 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11189 gen_logic_CC(tmp2);
11192 case 0x5: /* adc */
11193 if (s->condexec_mask) {
11194 gen_adc(tmp, tmp2);
11196 gen_adc_CC(tmp, tmp, tmp2);
11199 case 0x6: /* sbc */
11200 if (s->condexec_mask) {
11201 gen_sub_carry(tmp, tmp, tmp2);
11203 gen_sbc_CC(tmp, tmp, tmp2);
11206 case 0x7: /* ror */
11207 if (s->condexec_mask) {
11208 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11209 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11211 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11212 gen_logic_CC(tmp2);
11215 case 0x8: /* tst */
11216 tcg_gen_and_i32(tmp, tmp, tmp2);
11220 case 0x9: /* neg */
11221 if (s->condexec_mask)
11222 tcg_gen_neg_i32(tmp, tmp2);
11224 gen_sub_CC(tmp, tmp, tmp2);
11226 case 0xa: /* cmp */
11227 gen_sub_CC(tmp, tmp, tmp2);
11230 case 0xb: /* cmn */
11231 gen_add_CC(tmp, tmp, tmp2);
11234 case 0xc: /* orr */
11235 tcg_gen_or_i32(tmp, tmp, tmp2);
11236 if (!s->condexec_mask)
11239 case 0xd: /* mul */
11240 tcg_gen_mul_i32(tmp, tmp, tmp2);
11241 if (!s->condexec_mask)
11244 case 0xe: /* bic */
11245 tcg_gen_andc_i32(tmp, tmp, tmp2);
11246 if (!s->condexec_mask)
11249 case 0xf: /* mvn */
11250 tcg_gen_not_i32(tmp2, tmp2);
11251 if (!s->condexec_mask)
11252 gen_logic_CC(tmp2);
11259 store_reg(s, rm, tmp2);
11261 tcg_temp_free_i32(tmp);
11263 store_reg(s, rd, tmp);
11264 tcg_temp_free_i32(tmp2);
11267 tcg_temp_free_i32(tmp);
11268 tcg_temp_free_i32(tmp2);
11273 /* load/store register offset. */
11275 rn = (insn >> 3) & 7;
11276 rm = (insn >> 6) & 7;
11277 op = (insn >> 9) & 7;
11278 addr = load_reg(s, rn);
11279 tmp = load_reg(s, rm);
11280 tcg_gen_add_i32(addr, addr, tmp);
11281 tcg_temp_free_i32(tmp);
11283 if (op < 3) { /* store */
11284 tmp = load_reg(s, rd);
11286 tmp = tcg_temp_new_i32();
11291 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11294 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11297 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11299 case 3: /* ldrsb */
11300 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11303 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11306 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11309 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11311 case 7: /* ldrsh */
11312 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11315 if (op >= 3) { /* load */
11316 store_reg(s, rd, tmp);
11318 tcg_temp_free_i32(tmp);
11320 tcg_temp_free_i32(addr);
11324 /* load/store word immediate offset */
11326 rn = (insn >> 3) & 7;
11327 addr = load_reg(s, rn);
11328 val = (insn >> 4) & 0x7c;
11329 tcg_gen_addi_i32(addr, addr, val);
11331 if (insn & (1 << 11)) {
11333 tmp = tcg_temp_new_i32();
11334 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11335 store_reg(s, rd, tmp);
11338 tmp = load_reg(s, rd);
11339 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11340 tcg_temp_free_i32(tmp);
11342 tcg_temp_free_i32(addr);
11346 /* load/store byte immediate offset */
11348 rn = (insn >> 3) & 7;
11349 addr = load_reg(s, rn);
11350 val = (insn >> 6) & 0x1f;
11351 tcg_gen_addi_i32(addr, addr, val);
11353 if (insn & (1 << 11)) {
11355 tmp = tcg_temp_new_i32();
11356 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11357 store_reg(s, rd, tmp);
11360 tmp = load_reg(s, rd);
11361 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11362 tcg_temp_free_i32(tmp);
11364 tcg_temp_free_i32(addr);
11368 /* load/store halfword immediate offset */
11370 rn = (insn >> 3) & 7;
11371 addr = load_reg(s, rn);
11372 val = (insn >> 5) & 0x3e;
11373 tcg_gen_addi_i32(addr, addr, val);
11375 if (insn & (1 << 11)) {
11377 tmp = tcg_temp_new_i32();
11378 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11379 store_reg(s, rd, tmp);
11382 tmp = load_reg(s, rd);
11383 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11384 tcg_temp_free_i32(tmp);
11386 tcg_temp_free_i32(addr);
11390 /* load/store from stack */
11391 rd = (insn >> 8) & 7;
11392 addr = load_reg(s, 13);
11393 val = (insn & 0xff) * 4;
11394 tcg_gen_addi_i32(addr, addr, val);
11396 if (insn & (1 << 11)) {
11398 tmp = tcg_temp_new_i32();
11399 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11400 store_reg(s, rd, tmp);
11403 tmp = load_reg(s, rd);
11404 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11405 tcg_temp_free_i32(tmp);
11407 tcg_temp_free_i32(addr);
11412 * 0b1010_xxxx_xxxx_xxxx
11413 * - Add PC/SP (immediate)
11415 rd = (insn >> 8) & 7;
11416 val = (insn & 0xff) * 4;
11417 tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
11418 store_reg(s, rd, tmp);
11423 op = (insn >> 8) & 0xf;
11427 * 0b1011_0000_xxxx_xxxx
11428 * - ADD (SP plus immediate)
11429 * - SUB (SP minus immediate)
11431 tmp = load_reg(s, 13);
11432 val = (insn & 0x7f) * 4;
11433 if (insn & (1 << 7))
11434 val = -(int32_t)val;
11435 tcg_gen_addi_i32(tmp, tmp, val);
11436 store_sp_checked(s, tmp);
11439 case 2: /* sign/zero extend. */
11442 rm = (insn >> 3) & 7;
11443 tmp = load_reg(s, rm);
11444 switch ((insn >> 6) & 3) {
11445 case 0: gen_sxth(tmp); break;
11446 case 1: gen_sxtb(tmp); break;
11447 case 2: gen_uxth(tmp); break;
11448 case 3: gen_uxtb(tmp); break;
11450 store_reg(s, rd, tmp);
11452 case 4: case 5: case 0xc: case 0xd:
11454 * 0b1011_x10x_xxxx_xxxx
11457 addr = load_reg(s, 13);
11458 if (insn & (1 << 8))
11462 for (i = 0; i < 8; i++) {
11463 if (insn & (1 << i))
11466 if ((insn & (1 << 11)) == 0) {
11467 tcg_gen_addi_i32(addr, addr, -offset);
11470 if (s->v8m_stackcheck) {
11472 * Here 'addr' is the lower of "old SP" and "new SP";
11473 * if this is a pop that starts below the limit and ends
11474 * above it, it is UNKNOWN whether the limit check triggers;
11475 * we choose to trigger.
11477 gen_helper_v8m_stackcheck(cpu_env, addr);
11480 for (i = 0; i < 8; i++) {
11481 if (insn & (1 << i)) {
11482 if (insn & (1 << 11)) {
11484 tmp = tcg_temp_new_i32();
11485 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11486 store_reg(s, i, tmp);
11489 tmp = load_reg(s, i);
11490 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11491 tcg_temp_free_i32(tmp);
11493 /* advance to the next address. */
11494 tcg_gen_addi_i32(addr, addr, 4);
11498 if (insn & (1 << 8)) {
11499 if (insn & (1 << 11)) {
11501 tmp = tcg_temp_new_i32();
11502 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11503 /* don't set the pc until the rest of the instruction
11507 tmp = load_reg(s, 14);
11508 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11509 tcg_temp_free_i32(tmp);
11511 tcg_gen_addi_i32(addr, addr, 4);
11513 if ((insn & (1 << 11)) == 0) {
11514 tcg_gen_addi_i32(addr, addr, -offset);
11516 /* write back the new stack pointer */
11517 store_reg(s, 13, addr);
11518 /* set the new PC value */
11519 if ((insn & 0x0900) == 0x0900) {
11520 store_reg_from_load(s, 15, tmp);
11524 case 1: case 3: case 9: case 11: /* czb */
11526 tmp = load_reg(s, rm);
11527 arm_gen_condlabel(s);
11528 if (insn & (1 << 11))
11529 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11531 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11532 tcg_temp_free_i32(tmp);
11533 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11534 gen_jmp(s, read_pc(s) + offset);
11537 case 15: /* IT, nop-hint. */
11538 if ((insn & 0xf) == 0) {
11539 gen_nop_hint(s, (insn >> 4) & 0xf);
11545 * Combinations of firstcond and mask which set up an 0b1111
11546 * condition are UNPREDICTABLE; we take the CONSTRAINED
11547 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11548 * i.e. both meaning "execute always".
11550 s->condexec_cond = (insn >> 4) & 0xe;
11551 s->condexec_mask = insn & 0x1f;
11552 /* No actual code generated for this insn, just setup state. */
11555 case 0xe: /* bkpt */
11557 int imm8 = extract32(insn, 0, 8);
11559 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
11563 case 0xa: /* rev, and hlt */
11565 int op1 = extract32(insn, 6, 2);
11569 int imm6 = extract32(insn, 0, 6);
11575 /* Otherwise this is rev */
11577 rn = (insn >> 3) & 0x7;
11579 tmp = load_reg(s, rn);
11581 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11582 case 1: gen_rev16(tmp); break;
11583 case 3: gen_revsh(tmp); break;
11585 g_assert_not_reached();
11587 store_reg(s, rd, tmp);
11592 switch ((insn >> 5) & 7) {
11596 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11597 gen_helper_setend(cpu_env);
11598 s->base.is_jmp = DISAS_UPDATE;
11607 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11608 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11611 addr = tcg_const_i32(19);
11612 gen_helper_v7m_msr(cpu_env, addr, tmp);
11613 tcg_temp_free_i32(addr);
11617 addr = tcg_const_i32(16);
11618 gen_helper_v7m_msr(cpu_env, addr, tmp);
11619 tcg_temp_free_i32(addr);
11621 tcg_temp_free_i32(tmp);
11624 if (insn & (1 << 4)) {
11625 shift = CPSR_A | CPSR_I | CPSR_F;
11629 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11644 /* load/store multiple */
11645 TCGv_i32 loaded_var = NULL;
11646 rn = (insn >> 8) & 0x7;
11647 addr = load_reg(s, rn);
11648 for (i = 0; i < 8; i++) {
11649 if (insn & (1 << i)) {
11650 if (insn & (1 << 11)) {
11652 tmp = tcg_temp_new_i32();
11653 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11657 store_reg(s, i, tmp);
11661 tmp = load_reg(s, i);
11662 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11663 tcg_temp_free_i32(tmp);
11665 /* advance to the next address */
11666 tcg_gen_addi_i32(addr, addr, 4);
11669 if ((insn & (1 << rn)) == 0) {
11670 /* base reg not in list: base register writeback */
11671 store_reg(s, rn, addr);
11673 /* base reg in list: if load, complete it now */
11674 if (insn & (1 << 11)) {
11675 store_reg(s, rn, loaded_var);
11677 tcg_temp_free_i32(addr);
11682 /* conditional branch or swi */
11683 cond = (insn >> 8) & 0xf;
11689 gen_set_pc_im(s, s->base.pc_next);
11690 s->svc_imm = extract32(insn, 0, 8);
11691 s->base.is_jmp = DISAS_SWI;
11694 /* generate a conditional jump to next instruction */
11695 arm_skip_unless(s, cond);
11697 /* jump to the offset */
11699 offset = ((int32_t)insn << 24) >> 24;
11700 val += offset << 1;
11705 if (insn & (1 << 11)) {
11706 /* thumb_insn_is_16bit() ensures we can't get here for
11707 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11708 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11710 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11712 offset = ((insn & 0x7ff) << 1);
11713 tmp = load_reg(s, 14);
11714 tcg_gen_addi_i32(tmp, tmp, offset);
11715 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11717 tmp2 = tcg_temp_new_i32();
11718 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
11719 store_reg(s, 14, tmp2);
11723 /* unconditional branch */
11725 offset = ((int32_t)insn << 21) >> 21;
11726 val += offset << 1;
11731 /* thumb_insn_is_16bit() ensures we can't get here for
11732 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11734 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11736 if (insn & (1 << 11)) {
11737 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11738 offset = ((insn & 0x7ff) << 1) | 1;
11739 tmp = load_reg(s, 14);
11740 tcg_gen_addi_i32(tmp, tmp, offset);
11742 tmp2 = tcg_temp_new_i32();
11743 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
11744 store_reg(s, 14, tmp2);
11747 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11748 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11750 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
11757 unallocated_encoding(s);
11760 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11762 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
11763 * (False positives are OK, false negatives are not.)
11764 * We know this is a Thumb insn, and our caller ensures we are
11765 * only called if dc->base.pc_next is less than 4 bytes from the page
11766 * boundary, so we cross the page if the first 16 bits indicate
11767 * that this is a 32 bit insn.
11769 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
11771 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
11774 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
11776 DisasContext *dc = container_of(dcbase, DisasContext, base);
11777 CPUARMState *env = cs->env_ptr;
11778 ARMCPU *cpu = env_archcpu(env);
11779 uint32_t tb_flags = dc->base.tb->flags;
11780 uint32_t condexec, core_mmu_idx;
11782 dc->isar = &cpu->isar;
11786 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11787 * there is no secure EL1, so we route exceptions to EL3.
11789 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11790 !arm_el_is_aa64(env, 3);
11791 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11792 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11793 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11794 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11795 dc->condexec_mask = (condexec & 0xf) << 1;
11796 dc->condexec_cond = condexec >> 4;
11797 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11798 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
11799 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11800 #if !defined(CONFIG_USER_ONLY)
11801 dc->user = (dc->current_el == 0);
11803 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11804 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11805 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11806 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
11807 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11808 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11809 dc->vec_stride = 0;
11811 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11814 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
11815 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11816 regime_is_secure(env, dc->mmu_idx);
11817 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
11818 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
11819 dc->v7m_new_fp_ctxt_needed =
11820 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
11821 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
11822 dc->cp_regs = cpu->cp_regs;
11823 dc->features = env->features;
11825 /* Single step state. The code-generation logic here is:
11827 * generate code with no special handling for single-stepping (except
11828 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11829 * this happens anyway because those changes are all system register or
11831 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11832 * emit code for one insn
11833 * emit code to clear PSTATE.SS
11834 * emit code to generate software step exception for completed step
11835 * end TB (as usual for having generated an exception)
11836 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11837 * emit code to generate a software step exception
11840 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11841 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
11842 dc->is_ldex = false;
11843 if (!arm_feature(env, ARM_FEATURE_M)) {
11844 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
11847 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
11849 /* If architectural single step active, limit to 1. */
11850 if (is_singlestepping(dc)) {
11851 dc->base.max_insns = 1;
11854 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11855 to those left on the page. */
11857 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
11858 dc->base.max_insns = MIN(dc->base.max_insns, bound);
11861 cpu_V0 = tcg_temp_new_i64();
11862 cpu_V1 = tcg_temp_new_i64();
11863 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11864 cpu_M0 = tcg_temp_new_i64();
11867 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11869 DisasContext *dc = container_of(dcbase, DisasContext, base);
11871 /* A note on handling of the condexec (IT) bits:
11873 * We want to avoid the overhead of having to write the updated condexec
11874 * bits back to the CPUARMState for every instruction in an IT block. So:
11875 * (1) if the condexec bits are not already zero then we write
11876 * zero back into the CPUARMState now. This avoids complications trying
11877 * to do it at the end of the block. (For example if we don't do this
11878 * it's hard to identify whether we can safely skip writing condexec
11879 * at the end of the TB, which we definitely want to do for the case
11880 * where a TB doesn't do anything with the IT state at all.)
11881 * (2) if we are going to leave the TB then we call gen_set_condexec()
11882 * which will write the correct value into CPUARMState if zero is wrong.
11883 * This is done both for leaving the TB at the end, and for leaving
11884 * it because of an exception we know will happen, which is done in
11885 * gen_exception_insn(). The latter is necessary because we need to
11886 * leave the TB with the PC/IT state just prior to execution of the
11887 * instruction which caused the exception.
11888 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11889 * then the CPUARMState will be wrong and we need to reset it.
11890 * This is handled in the same way as restoration of the
11891 * PC in these situations; we save the value of the condexec bits
11892 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11893 * then uses this to restore them after an exception.
11895 * Note that there are no instructions which can read the condexec
11896 * bits, and none which can write non-static values to them, so
11897 * we don't need to care about whether CPUARMState is correct in the
11901 /* Reset the conditional execution bits immediately. This avoids
11902 complications trying to do it at the end of the block. */
11903 if (dc->condexec_mask || dc->condexec_cond) {
11904 TCGv_i32 tmp = tcg_temp_new_i32();
11905 tcg_gen_movi_i32(tmp, 0);
11906 store_cpu_field(tmp, condexec_bits);
11910 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11912 DisasContext *dc = container_of(dcbase, DisasContext, base);
11914 tcg_gen_insn_start(dc->base.pc_next,
11915 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11917 dc->insn_start = tcg_last_op();
11920 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11921 const CPUBreakpoint *bp)
11923 DisasContext *dc = container_of(dcbase, DisasContext, base);
11925 if (bp->flags & BP_CPU) {
11926 gen_set_condexec(dc);
11927 gen_set_pc_im(dc, dc->base.pc_next);
11928 gen_helper_check_breakpoints(cpu_env);
11929 /* End the TB early; it's likely not going to be executed */
11930 dc->base.is_jmp = DISAS_TOO_MANY;
11932 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
11933 /* The address covered by the breakpoint must be
11934 included in [tb->pc, tb->pc + tb->size) in order
11935 to for it to be properly cleared -- thus we
11936 increment the PC here so that the logic setting
11937 tb->size below does the right thing. */
11938 /* TODO: Advance PC by correct instruction length to
11939 * avoid disassembler error messages */
11940 dc->base.pc_next += 2;
11941 dc->base.is_jmp = DISAS_NORETURN;
11947 static bool arm_pre_translate_insn(DisasContext *dc)
11949 #ifdef CONFIG_USER_ONLY
11950 /* Intercept jump to the magic kernel page. */
11951 if (dc->base.pc_next >= 0xffff0000) {
11952 /* We always get here via a jump, so know we are not in a
11953 conditional execution block. */
11954 gen_exception_internal(EXCP_KERNEL_TRAP);
11955 dc->base.is_jmp = DISAS_NORETURN;
11960 if (dc->ss_active && !dc->pstate_ss) {
11961 /* Singlestep state is Active-pending.
11962 * If we're in this state at the start of a TB then either
11963 * a) we just took an exception to an EL which is being debugged
11964 * and this is the first insn in the exception handler
11965 * b) debug exceptions were masked and we just unmasked them
11966 * without changing EL (eg by clearing PSTATE.D)
11967 * In either case we're going to take a swstep exception in the
11968 * "did not step an insn" case, and so the syndrome ISV and EX
11969 * bits should be zero.
11971 assert(dc->base.num_insns == 1);
11972 gen_swstep_exception(dc, 0, 0);
11973 dc->base.is_jmp = DISAS_NORETURN;
11980 static void arm_post_translate_insn(DisasContext *dc)
11982 if (dc->condjmp && !dc->base.is_jmp) {
11983 gen_set_label(dc->condlabel);
11986 translator_loop_temp_check(&dc->base);
11989 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
11991 DisasContext *dc = container_of(dcbase, DisasContext, base);
11992 CPUARMState *env = cpu->env_ptr;
11995 if (arm_pre_translate_insn(dc)) {
11999 dc->pc_curr = dc->base.pc_next;
12000 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
12002 dc->base.pc_next += 4;
12003 disas_arm_insn(dc, insn);
12005 arm_post_translate_insn(dc);
12007 /* ARM is a fixed-length ISA. We performed the cross-page check
12008 in init_disas_context by adjusting max_insns. */
12011 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12013 /* Return true if this Thumb insn is always unconditional,
12014 * even inside an IT block. This is true of only a very few
12015 * instructions: BKPT, HLT, and SG.
12017 * A larger class of instructions are UNPREDICTABLE if used
12018 * inside an IT block; we do not need to detect those here, because
12019 * what we do by default (perform the cc check and update the IT
12020 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12021 * choice for those situations.
12023 * insn is either a 16-bit or a 32-bit instruction; the two are
12024 * distinguishable because for the 16-bit case the top 16 bits
12025 * are zeroes, and that isn't a valid 32-bit encoding.
12027 if ((insn & 0xffffff00) == 0xbe00) {
12032 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12033 !arm_dc_feature(s, ARM_FEATURE_M)) {
12034 /* HLT: v8A only. This is unconditional even when it is going to
12035 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12036 * For v7 cores this was a plain old undefined encoding and so
12037 * honours its cc check. (We might be using the encoding as
12038 * a semihosting trap, but we don't change the cc check behaviour
12039 * on that account, because a debugger connected to a real v7A
12040 * core and emulating semihosting traps by catching the UNDEF
12041 * exception would also only see cases where the cc check passed.
12042 * No guest code should be trying to do a HLT semihosting trap
12043 * in an IT block anyway.
12048 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12049 arm_dc_feature(s, ARM_FEATURE_M)) {
12057 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12059 DisasContext *dc = container_of(dcbase, DisasContext, base);
12060 CPUARMState *env = cpu->env_ptr;
12064 if (arm_pre_translate_insn(dc)) {
12068 dc->pc_curr = dc->base.pc_next;
12069 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12070 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
12071 dc->base.pc_next += 2;
12073 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12075 insn = insn << 16 | insn2;
12076 dc->base.pc_next += 2;
12080 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
12081 uint32_t cond = dc->condexec_cond;
12084 * Conditionally skip the insn. Note that both 0xe and 0xf mean
12085 * "always"; 0xf is not "never".
12088 arm_skip_unless(dc, cond);
12093 disas_thumb_insn(dc, insn);
12095 disas_thumb2_insn(dc, insn);
12098 /* Advance the Thumb condexec condition. */
12099 if (dc->condexec_mask) {
12100 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12101 ((dc->condexec_mask >> 4) & 1));
12102 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12103 if (dc->condexec_mask == 0) {
12104 dc->condexec_cond = 0;
12108 arm_post_translate_insn(dc);
12110 /* Thumb is a variable-length ISA. Stop translation when the next insn
12111 * will touch a new page. This ensures that prefetch aborts occur at
12114 * We want to stop the TB if the next insn starts in a new page,
12115 * or if it spans between this page and the next. This means that
12116 * if we're looking at the last halfword in the page we need to
12117 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12118 * or a 32-bit Thumb insn (which won't).
12119 * This is to avoid generating a silly TB with a single 16-bit insn
12120 * in it at the end of this page (which would execute correctly
12121 * but isn't very efficient).
12123 if (dc->base.is_jmp == DISAS_NEXT
12124 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
12125 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
12126 && insn_crosses_page(env, dc)))) {
12127 dc->base.is_jmp = DISAS_TOO_MANY;
12131 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12133 DisasContext *dc = container_of(dcbase, DisasContext, base);
12135 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
12136 /* FIXME: This can theoretically happen with self-modifying code. */
12137 cpu_abort(cpu, "IO on conditional branch instruction");
12140 /* At this stage dc->condjmp will only be set when the skipped
12141 instruction was a conditional branch or trap, and the PC has
12142 already been written. */
12143 gen_set_condexec(dc);
12144 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12145 /* Exception return branches need some special case code at the
12146 * end of the TB, which is complex enough that it has to
12147 * handle the single-step vs not and the condition-failed
12148 * insn codepath itself.
12150 gen_bx_excret_final_code(dc);
12151 } else if (unlikely(is_singlestepping(dc))) {
12152 /* Unconditional and "condition passed" instruction codepath. */
12153 switch (dc->base.is_jmp) {
12155 gen_ss_advance(dc);
12156 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12157 default_exception_el(dc));
12160 gen_ss_advance(dc);
12161 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12164 gen_ss_advance(dc);
12165 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12168 case DISAS_TOO_MANY:
12170 gen_set_pc_im(dc, dc->base.pc_next);
12173 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12174 gen_singlestep_exception(dc);
12176 case DISAS_NORETURN:
12180 /* While branches must always occur at the end of an IT block,
12181 there are a few other things that can cause us to terminate
12182 the TB in the middle of an IT block:
12183 - Exception generating instructions (bkpt, swi, undefined).
12185 - Hardware watchpoints.
12186 Hardware breakpoints have already been handled and skip this code.
12188 switch(dc->base.is_jmp) {
12190 case DISAS_TOO_MANY:
12191 gen_goto_tb(dc, 1, dc->base.pc_next);
12197 gen_set_pc_im(dc, dc->base.pc_next);
12200 /* indicate that the hash table must be used to find the next TB */
12201 tcg_gen_exit_tb(NULL, 0);
12203 case DISAS_NORETURN:
12204 /* nothing more to generate */
12208 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12209 !(dc->insn & (1U << 31))) ? 2 : 4);
12211 gen_helper_wfi(cpu_env, tmp);
12212 tcg_temp_free_i32(tmp);
12213 /* The helper doesn't necessarily throw an exception, but we
12214 * must go back to the main loop to check for interrupts anyway.
12216 tcg_gen_exit_tb(NULL, 0);
12220 gen_helper_wfe(cpu_env);
12223 gen_helper_yield(cpu_env);
12226 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12227 default_exception_el(dc));
12230 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12233 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12239 /* "Condition failed" instruction codepath for the branch/trap insn */
12240 gen_set_label(dc->condlabel);
12241 gen_set_condexec(dc);
12242 if (unlikely(is_singlestepping(dc))) {
12243 gen_set_pc_im(dc, dc->base.pc_next);
12244 gen_singlestep_exception(dc);
12246 gen_goto_tb(dc, 1, dc->base.pc_next);
12251 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12253 DisasContext *dc = container_of(dcbase, DisasContext, base);
12255 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12256 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
12259 static const TranslatorOps arm_translator_ops = {
12260 .init_disas_context = arm_tr_init_disas_context,
12261 .tb_start = arm_tr_tb_start,
12262 .insn_start = arm_tr_insn_start,
12263 .breakpoint_check = arm_tr_breakpoint_check,
12264 .translate_insn = arm_tr_translate_insn,
12265 .tb_stop = arm_tr_tb_stop,
12266 .disas_log = arm_tr_disas_log,
12269 static const TranslatorOps thumb_translator_ops = {
12270 .init_disas_context = arm_tr_init_disas_context,
12271 .tb_start = arm_tr_tb_start,
12272 .insn_start = arm_tr_insn_start,
12273 .breakpoint_check = arm_tr_breakpoint_check,
12274 .translate_insn = thumb_tr_translate_insn,
12275 .tb_stop = arm_tr_tb_stop,
12276 .disas_log = arm_tr_disas_log,
12279 /* generate intermediate code for basic block 'tb'. */
12280 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
12283 const TranslatorOps *ops = &arm_translator_ops;
12285 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
12286 ops = &thumb_translator_ops;
12288 #ifdef TARGET_AARCH64
12289 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
12290 ops = &aarch64_translator_ops;
12294 translator_loop(ops, &dc.base, cpu, tb, max_insns);
12297 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12298 target_ulong *data)
12302 env->condexec_bits = 0;
12303 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12305 env->regs[15] = data[0];
12306 env->condexec_bits = data[1];
12307 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;