4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
28 #include "tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "exec/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
73 #include "exec/gen-icount.h"
75 static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* Function prototypes for gen_ functions calling Neon helpers. */
80 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
83 /* initialize TCG globals. */
84 void arm_translate_init(void)
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
90 offsetof(CPUARMState, regs[i]),
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103 a64_translate_init();
106 /* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
109 typedef enum ISSInfo {
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
118 /* Save the syndrome information for a Data Abort */
119 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
149 static inline int get_a32_user_mem_index(DisasContext *s)
151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
179 g_assert_not_reached();
183 static inline TCGv_i32 load_cpu_offset(int offset)
185 TCGv_i32 tmp = tcg_temp_new_i32();
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
190 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
192 static inline void store_cpu_offset(TCGv_i32 var, int offset)
194 tcg_gen_st_i32(var, cpu_env, offset);
195 tcg_temp_free_i32(var);
198 #define store_cpu_field(var, name) \
199 store_cpu_offset(var, offsetof(CPUARMState, name))
201 /* Set a variable to the value of a CPU register. */
202 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
206 /* normally, since we updated PC, we need only to add one insn */
208 addr = (long)s->pc + 2;
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
213 tcg_gen_mov_i32(var, cpu_R[reg]);
217 /* Create a new temporary and set it to the value of a CPU register. */
218 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
220 TCGv_i32 tmp = tcg_temp_new_i32();
221 load_reg_var(s, tmp, reg);
225 /* Set a CPU register. The source must be a temporary and will be
227 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
236 s->base.is_jmp = DISAS_JUMP;
238 tcg_gen_mov_i32(cpu_R[reg], var);
239 tcg_temp_free_i32(var);
242 /* Value extensions. */
243 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
244 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
245 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
246 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
248 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
249 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
252 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
254 TCGv_i32 tmp_mask = tcg_const_i32(mask);
255 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
256 tcg_temp_free_i32(tmp_mask);
258 /* Set NZCV flags from the high 4 bits of var. */
259 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
261 static void gen_exception_internal(int excp)
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
265 assert(excp_is_internal(excp));
266 gen_helper_exception_internal(cpu_env, tcg_excp);
267 tcg_temp_free_i32(tcg_excp);
270 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
272 TCGv_i32 tcg_excp = tcg_const_i32(excp);
273 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
274 TCGv_i32 tcg_el = tcg_const_i32(target_el);
276 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
279 tcg_temp_free_i32(tcg_el);
280 tcg_temp_free_i32(tcg_syn);
281 tcg_temp_free_i32(tcg_excp);
284 static void gen_ss_advance(DisasContext *s)
286 /* If the singlestep state is Active-not-pending, advance to
291 gen_helper_clear_pstate_ss(cpu_env);
295 static void gen_step_complete_exception(DisasContext *s)
297 /* We just completed step of an insn. Move from Active-not-pending
298 * to Active-pending, and then also take the swstep exception.
299 * This corresponds to making the (IMPDEF) choice to prioritize
300 * swstep exceptions over asynchronous exceptions taken to an exception
301 * level where debug is disabled. This choice has the advantage that
302 * we do not need to maintain internal state corresponding to the
303 * ISV/EX syndrome bits between completion of the step and generation
304 * of the exception, and our syndrome information is always correct.
307 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
308 default_exception_el(s));
309 s->base.is_jmp = DISAS_NORETURN;
312 static void gen_singlestep_exception(DisasContext *s)
314 /* Generate the right kind of exception for singlestep, which is
315 * either the architectural singlestep or EXCP_DEBUG for QEMU's
316 * gdb singlestepping.
319 gen_step_complete_exception(s);
321 gen_exception_internal(EXCP_DEBUG);
325 static inline bool is_singlestepping(DisasContext *s)
327 /* Return true if we are singlestepping either because of
328 * architectural singlestep or QEMU gdbstub singlestep. This does
329 * not include the command line '-singlestep' mode which is rather
330 * misnamed as it only means "one instruction per TB" and doesn't
331 * affect the code we generate.
333 return s->base.singlestep_enabled || s->ss_active;
336 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
338 TCGv_i32 tmp1 = tcg_temp_new_i32();
339 TCGv_i32 tmp2 = tcg_temp_new_i32();
340 tcg_gen_ext16s_i32(tmp1, a);
341 tcg_gen_ext16s_i32(tmp2, b);
342 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
343 tcg_temp_free_i32(tmp2);
344 tcg_gen_sari_i32(a, a, 16);
345 tcg_gen_sari_i32(b, b, 16);
346 tcg_gen_mul_i32(b, b, a);
347 tcg_gen_mov_i32(a, tmp1);
348 tcg_temp_free_i32(tmp1);
351 /* Byteswap each halfword. */
352 static void gen_rev16(TCGv_i32 var)
354 TCGv_i32 tmp = tcg_temp_new_i32();
355 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
356 tcg_gen_shri_i32(tmp, var, 8);
357 tcg_gen_and_i32(tmp, tmp, mask);
358 tcg_gen_and_i32(var, var, mask);
359 tcg_gen_shli_i32(var, var, 8);
360 tcg_gen_or_i32(var, var, tmp);
361 tcg_temp_free_i32(mask);
362 tcg_temp_free_i32(tmp);
365 /* Byteswap low halfword and sign extend. */
366 static void gen_revsh(TCGv_i32 var)
368 tcg_gen_ext16u_i32(var, var);
369 tcg_gen_bswap16_i32(var, var);
370 tcg_gen_ext16s_i32(var, var);
373 /* Return (b << 32) + a. Mark inputs as dead */
374 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
376 TCGv_i64 tmp64 = tcg_temp_new_i64();
378 tcg_gen_extu_i32_i64(tmp64, b);
379 tcg_temp_free_i32(b);
380 tcg_gen_shli_i64(tmp64, tmp64, 32);
381 tcg_gen_add_i64(a, tmp64, a);
383 tcg_temp_free_i64(tmp64);
387 /* Return (b << 32) - a. Mark inputs as dead. */
388 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
390 TCGv_i64 tmp64 = tcg_temp_new_i64();
392 tcg_gen_extu_i32_i64(tmp64, b);
393 tcg_temp_free_i32(b);
394 tcg_gen_shli_i64(tmp64, tmp64, 32);
395 tcg_gen_sub_i64(a, tmp64, a);
397 tcg_temp_free_i64(tmp64);
401 /* 32x32->64 multiply. Marks inputs as dead. */
402 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
404 TCGv_i32 lo = tcg_temp_new_i32();
405 TCGv_i32 hi = tcg_temp_new_i32();
408 tcg_gen_mulu2_i32(lo, hi, a, b);
409 tcg_temp_free_i32(a);
410 tcg_temp_free_i32(b);
412 ret = tcg_temp_new_i64();
413 tcg_gen_concat_i32_i64(ret, lo, hi);
414 tcg_temp_free_i32(lo);
415 tcg_temp_free_i32(hi);
420 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
422 TCGv_i32 lo = tcg_temp_new_i32();
423 TCGv_i32 hi = tcg_temp_new_i32();
426 tcg_gen_muls2_i32(lo, hi, a, b);
427 tcg_temp_free_i32(a);
428 tcg_temp_free_i32(b);
430 ret = tcg_temp_new_i64();
431 tcg_gen_concat_i32_i64(ret, lo, hi);
432 tcg_temp_free_i32(lo);
433 tcg_temp_free_i32(hi);
438 /* Swap low and high halfwords. */
439 static void gen_swap_half(TCGv_i32 var)
441 TCGv_i32 tmp = tcg_temp_new_i32();
442 tcg_gen_shri_i32(tmp, var, 16);
443 tcg_gen_shli_i32(var, var, 16);
444 tcg_gen_or_i32(var, var, tmp);
445 tcg_temp_free_i32(tmp);
448 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
449 tmp = (t0 ^ t1) & 0x8000;
452 t0 = (t0 + t1) ^ tmp;
455 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
457 TCGv_i32 tmp = tcg_temp_new_i32();
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andi_i32(tmp, tmp, 0x8000);
460 tcg_gen_andi_i32(t0, t0, ~0x8000);
461 tcg_gen_andi_i32(t1, t1, ~0x8000);
462 tcg_gen_add_i32(t0, t0, t1);
463 tcg_gen_xor_i32(t0, t0, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_temp_free_i32(t1);
468 /* Set CF to the top bit of var. */
469 static void gen_set_CF_bit31(TCGv_i32 var)
471 tcg_gen_shri_i32(cpu_CF, var, 31);
474 /* Set N and Z flags from var. */
475 static inline void gen_logic_CC(TCGv_i32 var)
477 tcg_gen_mov_i32(cpu_NF, var);
478 tcg_gen_mov_i32(cpu_ZF, var);
482 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
484 tcg_gen_add_i32(t0, t0, t1);
485 tcg_gen_add_i32(t0, t0, cpu_CF);
488 /* dest = T0 + T1 + CF. */
489 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
491 tcg_gen_add_i32(dest, t0, t1);
492 tcg_gen_add_i32(dest, dest, cpu_CF);
495 /* dest = T0 - T1 + CF - 1. */
496 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
498 tcg_gen_sub_i32(dest, t0, t1);
499 tcg_gen_add_i32(dest, dest, cpu_CF);
500 tcg_gen_subi_i32(dest, dest, 1);
503 /* dest = T0 + T1. Compute C, N, V and Z flags */
504 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
506 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_movi_i32(tmp, 0);
508 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
509 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
510 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
511 tcg_gen_xor_i32(tmp, t0, t1);
512 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
513 tcg_temp_free_i32(tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
517 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
518 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
520 TCGv_i32 tmp = tcg_temp_new_i32();
521 if (TCG_TARGET_HAS_add2_i32) {
522 tcg_gen_movi_i32(tmp, 0);
523 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
524 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
526 TCGv_i64 q0 = tcg_temp_new_i64();
527 TCGv_i64 q1 = tcg_temp_new_i64();
528 tcg_gen_extu_i32_i64(q0, t0);
529 tcg_gen_extu_i32_i64(q1, t1);
530 tcg_gen_add_i64(q0, q0, q1);
531 tcg_gen_extu_i32_i64(q1, cpu_CF);
532 tcg_gen_add_i64(q0, q0, q1);
533 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
534 tcg_temp_free_i64(q0);
535 tcg_temp_free_i64(q1);
537 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
538 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
539 tcg_gen_xor_i32(tmp, t0, t1);
540 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
541 tcg_temp_free_i32(tmp);
542 tcg_gen_mov_i32(dest, cpu_NF);
545 /* dest = T0 - T1. Compute C, N, V and Z flags */
546 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
549 tcg_gen_sub_i32(cpu_NF, t0, t1);
550 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
551 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
552 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
553 tmp = tcg_temp_new_i32();
554 tcg_gen_xor_i32(tmp, t0, t1);
555 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
556 tcg_temp_free_i32(tmp);
557 tcg_gen_mov_i32(dest, cpu_NF);
560 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
561 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
563 TCGv_i32 tmp = tcg_temp_new_i32();
564 tcg_gen_not_i32(tmp, t1);
565 gen_adc_CC(dest, t0, tmp);
566 tcg_temp_free_i32(tmp);
569 #define GEN_SHIFT(name) \
570 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
572 TCGv_i32 tmp1, tmp2, tmp3; \
573 tmp1 = tcg_temp_new_i32(); \
574 tcg_gen_andi_i32(tmp1, t1, 0xff); \
575 tmp2 = tcg_const_i32(0); \
576 tmp3 = tcg_const_i32(0x1f); \
577 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
578 tcg_temp_free_i32(tmp3); \
579 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
580 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
581 tcg_temp_free_i32(tmp2); \
582 tcg_temp_free_i32(tmp1); \
588 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
591 tmp1 = tcg_temp_new_i32();
592 tcg_gen_andi_i32(tmp1, t1, 0xff);
593 tmp2 = tcg_const_i32(0x1f);
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
595 tcg_temp_free_i32(tmp2);
596 tcg_gen_sar_i32(dest, t0, tmp1);
597 tcg_temp_free_i32(tmp1);
600 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
602 TCGv_i32 c0 = tcg_const_i32(0);
603 TCGv_i32 tmp = tcg_temp_new_i32();
604 tcg_gen_neg_i32(tmp, src);
605 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
606 tcg_temp_free_i32(c0);
607 tcg_temp_free_i32(tmp);
610 static void shifter_out_im(TCGv_i32 var, int shift)
613 tcg_gen_andi_i32(cpu_CF, var, 1);
615 tcg_gen_shri_i32(cpu_CF, var, shift);
617 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
622 /* Shift by immediate. Includes special handling for shift == 0. */
623 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
624 int shift, int flags)
630 shifter_out_im(var, 32 - shift);
631 tcg_gen_shli_i32(var, var, shift);
637 tcg_gen_shri_i32(cpu_CF, var, 31);
639 tcg_gen_movi_i32(var, 0);
642 shifter_out_im(var, shift - 1);
643 tcg_gen_shri_i32(var, var, shift);
650 shifter_out_im(var, shift - 1);
653 tcg_gen_sari_i32(var, var, shift);
655 case 3: /* ROR/RRX */
658 shifter_out_im(var, shift - 1);
659 tcg_gen_rotri_i32(var, var, shift); break;
661 TCGv_i32 tmp = tcg_temp_new_i32();
662 tcg_gen_shli_i32(tmp, cpu_CF, 31);
664 shifter_out_im(var, 0);
665 tcg_gen_shri_i32(var, var, 1);
666 tcg_gen_or_i32(var, var, tmp);
667 tcg_temp_free_i32(tmp);
672 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
673 TCGv_i32 shift, int flags)
677 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
678 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
679 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
680 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
685 gen_shl(var, var, shift);
688 gen_shr(var, var, shift);
691 gen_sar(var, var, shift);
693 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
694 tcg_gen_rotr_i32(var, var, shift); break;
697 tcg_temp_free_i32(shift);
700 #define PAS_OP(pfx) \
702 case 0: gen_pas_helper(glue(pfx,add16)); break; \
703 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
704 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
705 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
706 case 4: gen_pas_helper(glue(pfx,add8)); break; \
707 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
709 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
714 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
716 tmp = tcg_temp_new_ptr();
717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
719 tcg_temp_free_ptr(tmp);
722 tmp = tcg_temp_new_ptr();
723 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
725 tcg_temp_free_ptr(tmp);
727 #undef gen_pas_helper
728 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
741 #undef gen_pas_helper
746 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
747 #define PAS_OP(pfx) \
749 case 0: gen_pas_helper(glue(pfx,add8)); break; \
750 case 1: gen_pas_helper(glue(pfx,add16)); break; \
751 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
752 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
753 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
754 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
756 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
761 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
763 tmp = tcg_temp_new_ptr();
764 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
766 tcg_temp_free_ptr(tmp);
769 tmp = tcg_temp_new_ptr();
770 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
772 tcg_temp_free_ptr(tmp);
774 #undef gen_pas_helper
775 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
788 #undef gen_pas_helper
794 * Generate a conditional based on ARM condition code cc.
795 * This is common between ARM and Aarch64 targets.
797 void arm_test_cc(DisasCompare *cmp, int cc)
828 case 8: /* hi: C && !Z */
829 case 9: /* ls: !C || Z -> !(C && !Z) */
831 value = tcg_temp_new_i32();
833 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
834 ZF is non-zero for !Z; so AND the two subexpressions. */
835 tcg_gen_neg_i32(value, cpu_CF);
836 tcg_gen_and_i32(value, value, cpu_ZF);
839 case 10: /* ge: N == V -> N ^ V == 0 */
840 case 11: /* lt: N != V -> N ^ V != 0 */
841 /* Since we're only interested in the sign bit, == 0 is >= 0. */
843 value = tcg_temp_new_i32();
845 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
848 case 12: /* gt: !Z && N == V */
849 case 13: /* le: Z || N != V */
851 value = tcg_temp_new_i32();
853 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
854 * the sign bit then AND with ZF to yield the result. */
855 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
856 tcg_gen_sari_i32(value, value, 31);
857 tcg_gen_andc_i32(value, cpu_ZF, value);
860 case 14: /* always */
861 case 15: /* always */
862 /* Use the ALWAYS condition, which will fold early.
863 * It doesn't matter what we use for the value. */
864 cond = TCG_COND_ALWAYS;
869 fprintf(stderr, "Bad condition code 0x%x\n", cc);
874 cond = tcg_invert_cond(cond);
880 cmp->value_global = global;
883 void arm_free_cc(DisasCompare *cmp)
885 if (!cmp->value_global) {
886 tcg_temp_free_i32(cmp->value);
890 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
892 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
895 void arm_gen_test_cc(int cc, TCGLabel *label)
898 arm_test_cc(&cmp, cc);
899 arm_jump_cc(&cmp, label);
903 static const uint8_t table_logic_cc[16] = {
922 static inline void gen_set_condexec(DisasContext *s)
924 if (s->condexec_mask) {
925 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
926 TCGv_i32 tmp = tcg_temp_new_i32();
927 tcg_gen_movi_i32(tmp, val);
928 store_cpu_field(tmp, condexec_bits);
932 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
934 tcg_gen_movi_i32(cpu_R[15], val);
937 /* Set PC and Thumb state from an immediate address. */
938 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
942 s->base.is_jmp = DISAS_JUMP;
943 if (s->thumb != (addr & 1)) {
944 tmp = tcg_temp_new_i32();
945 tcg_gen_movi_i32(tmp, addr & 1);
946 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
947 tcg_temp_free_i32(tmp);
949 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
952 /* Set PC and Thumb state from var. var is marked as dead. */
953 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
955 s->base.is_jmp = DISAS_JUMP;
956 tcg_gen_andi_i32(cpu_R[15], var, ~1);
957 tcg_gen_andi_i32(var, var, 1);
958 store_cpu_field(var, thumb);
961 /* Set PC and Thumb state from var. var is marked as dead.
962 * For M-profile CPUs, include logic to detect exception-return
963 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
964 * and BX reg, and no others, and happens only for code in Handler mode.
966 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
968 /* Generate the same code here as for a simple bx, but flag via
969 * s->base.is_jmp that we need to do the rest of the work later.
972 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
973 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
974 s->base.is_jmp = DISAS_BX_EXCRET;
978 static inline void gen_bx_excret_final_code(DisasContext *s)
980 /* Generate the code to finish possible exception return and end the TB */
981 TCGLabel *excret_label = gen_new_label();
984 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
985 /* Covers FNC_RETURN and EXC_RETURN magic */
986 min_magic = FNC_RETURN_MIN_MAGIC;
988 /* EXC_RETURN magic only */
989 min_magic = EXC_RETURN_MIN_MAGIC;
992 /* Is the new PC value in the magic range indicating exception return? */
993 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
994 /* No: end the TB as we would for a DISAS_JMP */
995 if (is_singlestepping(s)) {
996 gen_singlestep_exception(s);
1000 gen_set_label(excret_label);
1001 /* Yes: this is an exception return.
1002 * At this point in runtime env->regs[15] and env->thumb will hold
1003 * the exception-return magic number, which do_v7m_exception_exit()
1004 * will read. Nothing else will be able to see those values because
1005 * the cpu-exec main loop guarantees that we will always go straight
1006 * from raising the exception to the exception-handling code.
1008 * gen_ss_advance(s) does nothing on M profile currently but
1009 * calling it is conceptually the right thing as we have executed
1010 * this instruction (compare SWI, HVC, SMC handling).
1013 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1016 static inline void gen_bxns(DisasContext *s, int rm)
1018 TCGv_i32 var = load_reg(s, rm);
1020 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1021 * we need to sync state before calling it, but:
1022 * - we don't need to do gen_set_pc_im() because the bxns helper will
1023 * always set the PC itself
1024 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1025 * unless it's outside an IT block or the last insn in an IT block,
1026 * so we know that condexec == 0 (already set at the top of the TB)
1027 * is correct in the non-UNPREDICTABLE cases, and we can choose
1028 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1030 gen_helper_v7m_bxns(cpu_env, var);
1031 tcg_temp_free_i32(var);
1032 s->base.is_jmp = DISAS_EXIT;
1035 static inline void gen_blxns(DisasContext *s, int rm)
1037 TCGv_i32 var = load_reg(s, rm);
1039 /* We don't need to sync condexec state, for the same reason as bxns.
1040 * We do however need to set the PC, because the blxns helper reads it.
1041 * The blxns helper may throw an exception.
1043 gen_set_pc_im(s, s->pc);
1044 gen_helper_v7m_blxns(cpu_env, var);
1045 tcg_temp_free_i32(var);
1046 s->base.is_jmp = DISAS_EXIT;
1049 /* Variant of store_reg which uses branch&exchange logic when storing
1050 to r15 in ARM architecture v7 and above. The source must be a temporary
1051 and will be marked as dead. */
1052 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1054 if (reg == 15 && ENABLE_ARCH_7) {
1057 store_reg(s, reg, var);
1061 /* Variant of store_reg which uses branch&exchange logic when storing
1062 * to r15 in ARM architecture v5T and above. This is used for storing
1063 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1064 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1065 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1067 if (reg == 15 && ENABLE_ARCH_5) {
1068 gen_bx_excret(s, var);
1070 store_reg(s, reg, var);
1074 #ifdef CONFIG_USER_ONLY
1075 #define IS_USER_ONLY 1
1077 #define IS_USER_ONLY 0
1080 /* Abstractions of "generate code to do a guest load/store for
1081 * AArch32", where a vaddr is always 32 bits (and is zero
1082 * extended if we're a 64 bit core) and data is also
1083 * 32 bits unless specifically doing a 64 bit access.
1084 * These functions work like tcg_gen_qemu_{ld,st}* except
1085 * that the address argument is TCGv_i32 rather than TCGv.
1088 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1090 TCGv addr = tcg_temp_new();
1091 tcg_gen_extu_i32_tl(addr, a32);
1093 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1094 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1095 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1100 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1101 int index, TCGMemOp opc)
1103 TCGv addr = gen_aa32_addr(s, a32, opc);
1104 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1105 tcg_temp_free(addr);
1108 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1109 int index, TCGMemOp opc)
1111 TCGv addr = gen_aa32_addr(s, a32, opc);
1112 tcg_gen_qemu_st_i32(val, addr, index, opc);
1113 tcg_temp_free(addr);
1116 #define DO_GEN_LD(SUFF, OPC) \
1117 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1118 TCGv_i32 a32, int index) \
1120 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1122 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1124 TCGv_i32 a32, int index, \
1127 gen_aa32_ld##SUFF(s, val, a32, index); \
1128 disas_set_da_iss(s, OPC, issinfo); \
1131 #define DO_GEN_ST(SUFF, OPC) \
1132 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1133 TCGv_i32 a32, int index) \
1135 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1137 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1139 TCGv_i32 a32, int index, \
1142 gen_aa32_st##SUFF(s, val, a32, index); \
1143 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1146 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1148 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1149 if (!IS_USER_ONLY && s->sctlr_b) {
1150 tcg_gen_rotri_i64(val, val, 32);
1154 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1155 int index, TCGMemOp opc)
1157 TCGv addr = gen_aa32_addr(s, a32, opc);
1158 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1159 gen_aa32_frob64(s, val);
1160 tcg_temp_free(addr);
1163 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1164 TCGv_i32 a32, int index)
1166 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1169 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1170 int index, TCGMemOp opc)
1172 TCGv addr = gen_aa32_addr(s, a32, opc);
1174 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1175 if (!IS_USER_ONLY && s->sctlr_b) {
1176 TCGv_i64 tmp = tcg_temp_new_i64();
1177 tcg_gen_rotri_i64(tmp, val, 32);
1178 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1179 tcg_temp_free_i64(tmp);
1181 tcg_gen_qemu_st_i64(val, addr, index, opc);
1183 tcg_temp_free(addr);
1186 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1187 TCGv_i32 a32, int index)
1189 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1192 DO_GEN_LD(8s, MO_SB)
1193 DO_GEN_LD(8u, MO_UB)
1194 DO_GEN_LD(16s, MO_SW)
1195 DO_GEN_LD(16u, MO_UW)
1196 DO_GEN_LD(32u, MO_UL)
1198 DO_GEN_ST(16, MO_UW)
1199 DO_GEN_ST(32, MO_UL)
1201 static inline void gen_hvc(DisasContext *s, int imm16)
1203 /* The pre HVC helper handles cases when HVC gets trapped
1204 * as an undefined insn by runtime configuration (ie before
1205 * the insn really executes).
1207 gen_set_pc_im(s, s->pc - 4);
1208 gen_helper_pre_hvc(cpu_env);
1209 /* Otherwise we will treat this as a real exception which
1210 * happens after execution of the insn. (The distinction matters
1211 * for the PC value reported to the exception handler and also
1212 * for single stepping.)
1215 gen_set_pc_im(s, s->pc);
1216 s->base.is_jmp = DISAS_HVC;
1219 static inline void gen_smc(DisasContext *s)
1221 /* As with HVC, we may take an exception either before or after
1222 * the insn executes.
1226 gen_set_pc_im(s, s->pc - 4);
1227 tmp = tcg_const_i32(syn_aa32_smc());
1228 gen_helper_pre_smc(cpu_env, tmp);
1229 tcg_temp_free_i32(tmp);
1230 gen_set_pc_im(s, s->pc);
1231 s->base.is_jmp = DISAS_SMC;
1234 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1236 gen_set_condexec(s);
1237 gen_set_pc_im(s, s->pc - offset);
1238 gen_exception_internal(excp);
1239 s->base.is_jmp = DISAS_NORETURN;
1242 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1243 int syn, uint32_t target_el)
1245 gen_set_condexec(s);
1246 gen_set_pc_im(s, s->pc - offset);
1247 gen_exception(excp, syn, target_el);
1248 s->base.is_jmp = DISAS_NORETURN;
1251 static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1255 gen_set_condexec(s);
1256 gen_set_pc_im(s, s->pc - offset);
1257 tcg_syn = tcg_const_i32(syn);
1258 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1259 tcg_temp_free_i32(tcg_syn);
1260 s->base.is_jmp = DISAS_NORETURN;
1263 /* Force a TB lookup after an instruction that changes the CPU state. */
1264 static inline void gen_lookup_tb(DisasContext *s)
1266 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1267 s->base.is_jmp = DISAS_EXIT;
1270 static inline void gen_hlt(DisasContext *s, int imm)
1272 /* HLT. This has two purposes.
1273 * Architecturally, it is an external halting debug instruction.
1274 * Since QEMU doesn't implement external debug, we treat this as
1275 * it is required for halting debug disabled: it will UNDEF.
1276 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1277 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1278 * must trigger semihosting even for ARMv7 and earlier, where
1279 * HLT was an undefined encoding.
1280 * In system mode, we don't allow userspace access to
1281 * semihosting, to provide some semblance of security
1282 * (and for consistency with our 32-bit semihosting).
1284 if (semihosting_enabled() &&
1285 #ifndef CONFIG_USER_ONLY
1286 s->current_el != 0 &&
1288 (imm == (s->thumb ? 0x3c : 0xf000))) {
1289 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1293 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1294 default_exception_el(s));
1297 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1300 int val, rm, shift, shiftop;
1303 if (!(insn & (1 << 25))) {
1306 if (!(insn & (1 << 23)))
1309 tcg_gen_addi_i32(var, var, val);
1311 /* shift/register */
1313 shift = (insn >> 7) & 0x1f;
1314 shiftop = (insn >> 5) & 3;
1315 offset = load_reg(s, rm);
1316 gen_arm_shift_im(offset, shiftop, shift, 0);
1317 if (!(insn & (1 << 23)))
1318 tcg_gen_sub_i32(var, var, offset);
1320 tcg_gen_add_i32(var, var, offset);
1321 tcg_temp_free_i32(offset);
1325 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1326 int extra, TCGv_i32 var)
1331 if (insn & (1 << 22)) {
1333 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1334 if (!(insn & (1 << 23)))
1338 tcg_gen_addi_i32(var, var, val);
1342 tcg_gen_addi_i32(var, var, extra);
1344 offset = load_reg(s, rm);
1345 if (!(insn & (1 << 23)))
1346 tcg_gen_sub_i32(var, var, offset);
1348 tcg_gen_add_i32(var, var, offset);
1349 tcg_temp_free_i32(offset);
1353 static TCGv_ptr get_fpstatus_ptr(int neon)
1355 TCGv_ptr statusptr = tcg_temp_new_ptr();
1358 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1360 offset = offsetof(CPUARMState, vfp.fp_status);
1362 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1366 #define VFP_OP2(name) \
1367 static inline void gen_vfp_##name(int dp) \
1369 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1371 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1373 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1375 tcg_temp_free_ptr(fpst); \
1385 static inline void gen_vfp_F1_mul(int dp)
1387 /* Like gen_vfp_mul() but put result in F1 */
1388 TCGv_ptr fpst = get_fpstatus_ptr(0);
1390 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1392 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1394 tcg_temp_free_ptr(fpst);
1397 static inline void gen_vfp_F1_neg(int dp)
1399 /* Like gen_vfp_neg() but put result in F1 */
1401 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1403 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1407 static inline void gen_vfp_abs(int dp)
1410 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1412 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1415 static inline void gen_vfp_neg(int dp)
1418 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1420 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1423 static inline void gen_vfp_sqrt(int dp)
1426 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1428 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1431 static inline void gen_vfp_cmp(int dp)
1434 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1436 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1439 static inline void gen_vfp_cmpe(int dp)
1442 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1444 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1447 static inline void gen_vfp_F1_ld0(int dp)
1450 tcg_gen_movi_i64(cpu_F1d, 0);
1452 tcg_gen_movi_i32(cpu_F1s, 0);
1455 #define VFP_GEN_ITOF(name) \
1456 static inline void gen_vfp_##name(int dp, int neon) \
1458 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1460 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1462 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1464 tcg_temp_free_ptr(statusptr); \
1471 #define VFP_GEN_FTOI(name) \
1472 static inline void gen_vfp_##name(int dp, int neon) \
1474 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1476 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1478 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1480 tcg_temp_free_ptr(statusptr); \
1489 #define VFP_GEN_FIX(name, round) \
1490 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1492 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1493 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1495 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1498 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1501 tcg_temp_free_i32(tmp_shift); \
1502 tcg_temp_free_ptr(statusptr); \
1504 VFP_GEN_FIX(tosh, _round_to_zero)
1505 VFP_GEN_FIX(tosl, _round_to_zero)
1506 VFP_GEN_FIX(touh, _round_to_zero)
1507 VFP_GEN_FIX(toul, _round_to_zero)
1514 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1517 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1519 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1523 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1526 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1528 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1532 static inline long vfp_reg_offset(bool dp, unsigned reg)
1535 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1537 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1539 ofs += offsetof(CPU_DoubleU, l.upper);
1541 ofs += offsetof(CPU_DoubleU, l.lower);
1547 /* Return the offset of a 32-bit piece of a NEON register.
1548 zero is the least significant end of the register. */
1550 neon_reg_offset (int reg, int n)
1554 return vfp_reg_offset(0, sreg);
1557 static TCGv_i32 neon_load_reg(int reg, int pass)
1559 TCGv_i32 tmp = tcg_temp_new_i32();
1560 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1564 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1566 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1567 tcg_temp_free_i32(var);
1570 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1572 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1575 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1577 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1580 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1582 TCGv_ptr ret = tcg_temp_new_ptr();
1583 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1587 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1588 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1589 #define tcg_gen_st_f32 tcg_gen_st_i32
1590 #define tcg_gen_st_f64 tcg_gen_st_i64
1592 static inline void gen_mov_F0_vreg(int dp, int reg)
1595 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1597 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1600 static inline void gen_mov_F1_vreg(int dp, int reg)
1603 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1605 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1608 static inline void gen_mov_vreg_F0(int dp, int reg)
1611 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1613 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1616 #define ARM_CP_RW_BIT (1 << 20)
1618 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1620 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1623 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1625 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1628 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1630 TCGv_i32 var = tcg_temp_new_i32();
1631 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1635 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1637 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1638 tcg_temp_free_i32(var);
1641 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1643 iwmmxt_store_reg(cpu_M0, rn);
1646 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1648 iwmmxt_load_reg(cpu_M0, rn);
1651 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1653 iwmmxt_load_reg(cpu_V1, rn);
1654 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1657 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1659 iwmmxt_load_reg(cpu_V1, rn);
1660 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1663 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1665 iwmmxt_load_reg(cpu_V1, rn);
1666 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1669 #define IWMMXT_OP(name) \
1670 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1672 iwmmxt_load_reg(cpu_V1, rn); \
1673 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1676 #define IWMMXT_OP_ENV(name) \
1677 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1679 iwmmxt_load_reg(cpu_V1, rn); \
1680 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1683 #define IWMMXT_OP_ENV_SIZE(name) \
1684 IWMMXT_OP_ENV(name##b) \
1685 IWMMXT_OP_ENV(name##w) \
1686 IWMMXT_OP_ENV(name##l)
1688 #define IWMMXT_OP_ENV1(name) \
1689 static inline void gen_op_iwmmxt_##name##_M0(void) \
1691 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1705 IWMMXT_OP_ENV_SIZE(unpackl)
1706 IWMMXT_OP_ENV_SIZE(unpackh)
1708 IWMMXT_OP_ENV1(unpacklub)
1709 IWMMXT_OP_ENV1(unpackluw)
1710 IWMMXT_OP_ENV1(unpacklul)
1711 IWMMXT_OP_ENV1(unpackhub)
1712 IWMMXT_OP_ENV1(unpackhuw)
1713 IWMMXT_OP_ENV1(unpackhul)
1714 IWMMXT_OP_ENV1(unpacklsb)
1715 IWMMXT_OP_ENV1(unpacklsw)
1716 IWMMXT_OP_ENV1(unpacklsl)
1717 IWMMXT_OP_ENV1(unpackhsb)
1718 IWMMXT_OP_ENV1(unpackhsw)
1719 IWMMXT_OP_ENV1(unpackhsl)
1721 IWMMXT_OP_ENV_SIZE(cmpeq)
1722 IWMMXT_OP_ENV_SIZE(cmpgtu)
1723 IWMMXT_OP_ENV_SIZE(cmpgts)
1725 IWMMXT_OP_ENV_SIZE(mins)
1726 IWMMXT_OP_ENV_SIZE(minu)
1727 IWMMXT_OP_ENV_SIZE(maxs)
1728 IWMMXT_OP_ENV_SIZE(maxu)
1730 IWMMXT_OP_ENV_SIZE(subn)
1731 IWMMXT_OP_ENV_SIZE(addn)
1732 IWMMXT_OP_ENV_SIZE(subu)
1733 IWMMXT_OP_ENV_SIZE(addu)
1734 IWMMXT_OP_ENV_SIZE(subs)
1735 IWMMXT_OP_ENV_SIZE(adds)
1737 IWMMXT_OP_ENV(avgb0)
1738 IWMMXT_OP_ENV(avgb1)
1739 IWMMXT_OP_ENV(avgw0)
1740 IWMMXT_OP_ENV(avgw1)
1742 IWMMXT_OP_ENV(packuw)
1743 IWMMXT_OP_ENV(packul)
1744 IWMMXT_OP_ENV(packuq)
1745 IWMMXT_OP_ENV(packsw)
1746 IWMMXT_OP_ENV(packsl)
1747 IWMMXT_OP_ENV(packsq)
1749 static void gen_op_iwmmxt_set_mup(void)
1752 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1753 tcg_gen_ori_i32(tmp, tmp, 2);
1754 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1757 static void gen_op_iwmmxt_set_cup(void)
1760 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1761 tcg_gen_ori_i32(tmp, tmp, 1);
1762 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1765 static void gen_op_iwmmxt_setpsr_nz(void)
1767 TCGv_i32 tmp = tcg_temp_new_i32();
1768 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1769 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1772 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1774 iwmmxt_load_reg(cpu_V1, rn);
1775 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1776 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1779 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1786 rd = (insn >> 16) & 0xf;
1787 tmp = load_reg(s, rd);
1789 offset = (insn & 0xff) << ((insn >> 7) & 2);
1790 if (insn & (1 << 24)) {
1792 if (insn & (1 << 23))
1793 tcg_gen_addi_i32(tmp, tmp, offset);
1795 tcg_gen_addi_i32(tmp, tmp, -offset);
1796 tcg_gen_mov_i32(dest, tmp);
1797 if (insn & (1 << 21))
1798 store_reg(s, rd, tmp);
1800 tcg_temp_free_i32(tmp);
1801 } else if (insn & (1 << 21)) {
1803 tcg_gen_mov_i32(dest, tmp);
1804 if (insn & (1 << 23))
1805 tcg_gen_addi_i32(tmp, tmp, offset);
1807 tcg_gen_addi_i32(tmp, tmp, -offset);
1808 store_reg(s, rd, tmp);
1809 } else if (!(insn & (1 << 23)))
1814 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1816 int rd = (insn >> 0) & 0xf;
1819 if (insn & (1 << 8)) {
1820 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1823 tmp = iwmmxt_load_creg(rd);
1826 tmp = tcg_temp_new_i32();
1827 iwmmxt_load_reg(cpu_V0, rd);
1828 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1830 tcg_gen_andi_i32(tmp, tmp, mask);
1831 tcg_gen_mov_i32(dest, tmp);
1832 tcg_temp_free_i32(tmp);
1836 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1837 (ie. an undefined instruction). */
1838 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1841 int rdhi, rdlo, rd0, rd1, i;
1843 TCGv_i32 tmp, tmp2, tmp3;
1845 if ((insn & 0x0e000e00) == 0x0c000000) {
1846 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1848 rdlo = (insn >> 12) & 0xf;
1849 rdhi = (insn >> 16) & 0xf;
1850 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1851 iwmmxt_load_reg(cpu_V0, wrd);
1852 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1853 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1854 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1855 } else { /* TMCRR */
1856 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1857 iwmmxt_store_reg(cpu_V0, wrd);
1858 gen_op_iwmmxt_set_mup();
1863 wrd = (insn >> 12) & 0xf;
1864 addr = tcg_temp_new_i32();
1865 if (gen_iwmmxt_address(s, insn, addr)) {
1866 tcg_temp_free_i32(addr);
1869 if (insn & ARM_CP_RW_BIT) {
1870 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1871 tmp = tcg_temp_new_i32();
1872 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1873 iwmmxt_store_creg(wrd, tmp);
1876 if (insn & (1 << 8)) {
1877 if (insn & (1 << 22)) { /* WLDRD */
1878 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1880 } else { /* WLDRW wRd */
1881 tmp = tcg_temp_new_i32();
1882 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1885 tmp = tcg_temp_new_i32();
1886 if (insn & (1 << 22)) { /* WLDRH */
1887 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1888 } else { /* WLDRB */
1889 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1893 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1894 tcg_temp_free_i32(tmp);
1896 gen_op_iwmmxt_movq_wRn_M0(wrd);
1899 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1900 tmp = iwmmxt_load_creg(wrd);
1901 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1903 gen_op_iwmmxt_movq_M0_wRn(wrd);
1904 tmp = tcg_temp_new_i32();
1905 if (insn & (1 << 8)) {
1906 if (insn & (1 << 22)) { /* WSTRD */
1907 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1908 } else { /* WSTRW wRd */
1909 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1910 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1913 if (insn & (1 << 22)) { /* WSTRH */
1914 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1915 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1916 } else { /* WSTRB */
1917 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1918 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1922 tcg_temp_free_i32(tmp);
1924 tcg_temp_free_i32(addr);
1928 if ((insn & 0x0f000000) != 0x0e000000)
1931 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1932 case 0x000: /* WOR */
1933 wrd = (insn >> 12) & 0xf;
1934 rd0 = (insn >> 0) & 0xf;
1935 rd1 = (insn >> 16) & 0xf;
1936 gen_op_iwmmxt_movq_M0_wRn(rd0);
1937 gen_op_iwmmxt_orq_M0_wRn(rd1);
1938 gen_op_iwmmxt_setpsr_nz();
1939 gen_op_iwmmxt_movq_wRn_M0(wrd);
1940 gen_op_iwmmxt_set_mup();
1941 gen_op_iwmmxt_set_cup();
1943 case 0x011: /* TMCR */
1946 rd = (insn >> 12) & 0xf;
1947 wrd = (insn >> 16) & 0xf;
1949 case ARM_IWMMXT_wCID:
1950 case ARM_IWMMXT_wCASF:
1952 case ARM_IWMMXT_wCon:
1953 gen_op_iwmmxt_set_cup();
1955 case ARM_IWMMXT_wCSSF:
1956 tmp = iwmmxt_load_creg(wrd);
1957 tmp2 = load_reg(s, rd);
1958 tcg_gen_andc_i32(tmp, tmp, tmp2);
1959 tcg_temp_free_i32(tmp2);
1960 iwmmxt_store_creg(wrd, tmp);
1962 case ARM_IWMMXT_wCGR0:
1963 case ARM_IWMMXT_wCGR1:
1964 case ARM_IWMMXT_wCGR2:
1965 case ARM_IWMMXT_wCGR3:
1966 gen_op_iwmmxt_set_cup();
1967 tmp = load_reg(s, rd);
1968 iwmmxt_store_creg(wrd, tmp);
1974 case 0x100: /* WXOR */
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 0) & 0xf;
1977 rd1 = (insn >> 16) & 0xf;
1978 gen_op_iwmmxt_movq_M0_wRn(rd0);
1979 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1980 gen_op_iwmmxt_setpsr_nz();
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1983 gen_op_iwmmxt_set_cup();
1985 case 0x111: /* TMRC */
1988 rd = (insn >> 12) & 0xf;
1989 wrd = (insn >> 16) & 0xf;
1990 tmp = iwmmxt_load_creg(wrd);
1991 store_reg(s, rd, tmp);
1993 case 0x300: /* WANDN */
1994 wrd = (insn >> 12) & 0xf;
1995 rd0 = (insn >> 0) & 0xf;
1996 rd1 = (insn >> 16) & 0xf;
1997 gen_op_iwmmxt_movq_M0_wRn(rd0);
1998 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1999 gen_op_iwmmxt_andq_M0_wRn(rd1);
2000 gen_op_iwmmxt_setpsr_nz();
2001 gen_op_iwmmxt_movq_wRn_M0(wrd);
2002 gen_op_iwmmxt_set_mup();
2003 gen_op_iwmmxt_set_cup();
2005 case 0x200: /* WAND */
2006 wrd = (insn >> 12) & 0xf;
2007 rd0 = (insn >> 0) & 0xf;
2008 rd1 = (insn >> 16) & 0xf;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
2010 gen_op_iwmmxt_andq_M0_wRn(rd1);
2011 gen_op_iwmmxt_setpsr_nz();
2012 gen_op_iwmmxt_movq_wRn_M0(wrd);
2013 gen_op_iwmmxt_set_mup();
2014 gen_op_iwmmxt_set_cup();
2016 case 0x810: case 0xa10: /* WMADD */
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 0) & 0xf;
2019 rd1 = (insn >> 16) & 0xf;
2020 gen_op_iwmmxt_movq_M0_wRn(rd0);
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2024 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2025 gen_op_iwmmxt_movq_wRn_M0(wrd);
2026 gen_op_iwmmxt_set_mup();
2028 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2029 wrd = (insn >> 12) & 0xf;
2030 rd0 = (insn >> 16) & 0xf;
2031 rd1 = (insn >> 0) & 0xf;
2032 gen_op_iwmmxt_movq_M0_wRn(rd0);
2033 switch ((insn >> 22) & 3) {
2035 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2038 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2041 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2050 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2051 wrd = (insn >> 12) & 0xf;
2052 rd0 = (insn >> 16) & 0xf;
2053 rd1 = (insn >> 0) & 0xf;
2054 gen_op_iwmmxt_movq_M0_wRn(rd0);
2055 switch ((insn >> 22) & 3) {
2057 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2060 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2063 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2068 gen_op_iwmmxt_movq_wRn_M0(wrd);
2069 gen_op_iwmmxt_set_mup();
2070 gen_op_iwmmxt_set_cup();
2072 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2073 wrd = (insn >> 12) & 0xf;
2074 rd0 = (insn >> 16) & 0xf;
2075 rd1 = (insn >> 0) & 0xf;
2076 gen_op_iwmmxt_movq_M0_wRn(rd0);
2077 if (insn & (1 << 22))
2078 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2080 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2081 if (!(insn & (1 << 20)))
2082 gen_op_iwmmxt_addl_M0_wRn(wrd);
2083 gen_op_iwmmxt_movq_wRn_M0(wrd);
2084 gen_op_iwmmxt_set_mup();
2086 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 if (insn & (1 << 21)) {
2092 if (insn & (1 << 20))
2093 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2095 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2097 if (insn & (1 << 20))
2098 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2100 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2102 gen_op_iwmmxt_movq_wRn_M0(wrd);
2103 gen_op_iwmmxt_set_mup();
2105 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2106 wrd = (insn >> 12) & 0xf;
2107 rd0 = (insn >> 16) & 0xf;
2108 rd1 = (insn >> 0) & 0xf;
2109 gen_op_iwmmxt_movq_M0_wRn(rd0);
2110 if (insn & (1 << 21))
2111 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2113 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2114 if (!(insn & (1 << 20))) {
2115 iwmmxt_load_reg(cpu_V1, wrd);
2116 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2118 gen_op_iwmmxt_movq_wRn_M0(wrd);
2119 gen_op_iwmmxt_set_mup();
2121 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2128 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2131 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2134 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2143 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2144 wrd = (insn >> 12) & 0xf;
2145 rd0 = (insn >> 16) & 0xf;
2146 rd1 = (insn >> 0) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 if (insn & (1 << 22)) {
2149 if (insn & (1 << 20))
2150 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2152 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2154 if (insn & (1 << 20))
2155 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2157 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2159 gen_op_iwmmxt_movq_wRn_M0(wrd);
2160 gen_op_iwmmxt_set_mup();
2161 gen_op_iwmmxt_set_cup();
2163 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 rd1 = (insn >> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0);
2168 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2169 tcg_gen_andi_i32(tmp, tmp, 7);
2170 iwmmxt_load_reg(cpu_V1, rd1);
2171 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2172 tcg_temp_free_i32(tmp);
2173 gen_op_iwmmxt_movq_wRn_M0(wrd);
2174 gen_op_iwmmxt_set_mup();
2176 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2177 if (((insn >> 6) & 3) == 3)
2179 rd = (insn >> 12) & 0xf;
2180 wrd = (insn >> 16) & 0xf;
2181 tmp = load_reg(s, rd);
2182 gen_op_iwmmxt_movq_M0_wRn(wrd);
2183 switch ((insn >> 6) & 3) {
2185 tmp2 = tcg_const_i32(0xff);
2186 tmp3 = tcg_const_i32((insn & 7) << 3);
2189 tmp2 = tcg_const_i32(0xffff);
2190 tmp3 = tcg_const_i32((insn & 3) << 4);
2193 tmp2 = tcg_const_i32(0xffffffff);
2194 tmp3 = tcg_const_i32((insn & 1) << 5);
2200 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2201 tcg_temp_free_i32(tmp3);
2202 tcg_temp_free_i32(tmp2);
2203 tcg_temp_free_i32(tmp);
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2207 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2208 rd = (insn >> 12) & 0xf;
2209 wrd = (insn >> 16) & 0xf;
2210 if (rd == 15 || ((insn >> 22) & 3) == 3)
2212 gen_op_iwmmxt_movq_M0_wRn(wrd);
2213 tmp = tcg_temp_new_i32();
2214 switch ((insn >> 22) & 3) {
2216 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2217 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2219 tcg_gen_ext8s_i32(tmp, tmp);
2221 tcg_gen_andi_i32(tmp, tmp, 0xff);
2225 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2226 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2228 tcg_gen_ext16s_i32(tmp, tmp);
2230 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2234 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2235 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2238 store_reg(s, rd, tmp);
2240 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2241 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2243 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2244 switch ((insn >> 22) & 3) {
2246 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2249 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2252 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2255 tcg_gen_shli_i32(tmp, tmp, 28);
2257 tcg_temp_free_i32(tmp);
2259 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2260 if (((insn >> 6) & 3) == 3)
2262 rd = (insn >> 12) & 0xf;
2263 wrd = (insn >> 16) & 0xf;
2264 tmp = load_reg(s, rd);
2265 switch ((insn >> 6) & 3) {
2267 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2270 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2273 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2276 tcg_temp_free_i32(tmp);
2277 gen_op_iwmmxt_movq_wRn_M0(wrd);
2278 gen_op_iwmmxt_set_mup();
2280 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2281 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2283 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2284 tmp2 = tcg_temp_new_i32();
2285 tcg_gen_mov_i32(tmp2, tmp);
2286 switch ((insn >> 22) & 3) {
2288 for (i = 0; i < 7; i ++) {
2289 tcg_gen_shli_i32(tmp2, tmp2, 4);
2290 tcg_gen_and_i32(tmp, tmp, tmp2);
2294 for (i = 0; i < 3; i ++) {
2295 tcg_gen_shli_i32(tmp2, tmp2, 8);
2296 tcg_gen_and_i32(tmp, tmp, tmp2);
2300 tcg_gen_shli_i32(tmp2, tmp2, 16);
2301 tcg_gen_and_i32(tmp, tmp, tmp2);
2305 tcg_temp_free_i32(tmp2);
2306 tcg_temp_free_i32(tmp);
2308 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2309 wrd = (insn >> 12) & 0xf;
2310 rd0 = (insn >> 16) & 0xf;
2311 gen_op_iwmmxt_movq_M0_wRn(rd0);
2312 switch ((insn >> 22) & 3) {
2314 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2317 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2320 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2328 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2329 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2331 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2332 tmp2 = tcg_temp_new_i32();
2333 tcg_gen_mov_i32(tmp2, tmp);
2334 switch ((insn >> 22) & 3) {
2336 for (i = 0; i < 7; i ++) {
2337 tcg_gen_shli_i32(tmp2, tmp2, 4);
2338 tcg_gen_or_i32(tmp, tmp, tmp2);
2342 for (i = 0; i < 3; i ++) {
2343 tcg_gen_shli_i32(tmp2, tmp2, 8);
2344 tcg_gen_or_i32(tmp, tmp, tmp2);
2348 tcg_gen_shli_i32(tmp2, tmp2, 16);
2349 tcg_gen_or_i32(tmp, tmp, tmp2);
2353 tcg_temp_free_i32(tmp2);
2354 tcg_temp_free_i32(tmp);
2356 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2357 rd = (insn >> 12) & 0xf;
2358 rd0 = (insn >> 16) & 0xf;
2359 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2361 gen_op_iwmmxt_movq_M0_wRn(rd0);
2362 tmp = tcg_temp_new_i32();
2363 switch ((insn >> 22) & 3) {
2365 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2368 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2371 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2374 store_reg(s, rd, tmp);
2376 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2377 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2378 wrd = (insn >> 12) & 0xf;
2379 rd0 = (insn >> 16) & 0xf;
2380 rd1 = (insn >> 0) & 0xf;
2381 gen_op_iwmmxt_movq_M0_wRn(rd0);
2382 switch ((insn >> 22) & 3) {
2384 if (insn & (1 << 21))
2385 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2387 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2390 if (insn & (1 << 21))
2391 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2393 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2396 if (insn & (1 << 21))
2397 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2399 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2404 gen_op_iwmmxt_movq_wRn_M0(wrd);
2405 gen_op_iwmmxt_set_mup();
2406 gen_op_iwmmxt_set_cup();
2408 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2409 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2410 wrd = (insn >> 12) & 0xf;
2411 rd0 = (insn >> 16) & 0xf;
2412 gen_op_iwmmxt_movq_M0_wRn(rd0);
2413 switch ((insn >> 22) & 3) {
2415 if (insn & (1 << 21))
2416 gen_op_iwmmxt_unpacklsb_M0();
2418 gen_op_iwmmxt_unpacklub_M0();
2421 if (insn & (1 << 21))
2422 gen_op_iwmmxt_unpacklsw_M0();
2424 gen_op_iwmmxt_unpackluw_M0();
2427 if (insn & (1 << 21))
2428 gen_op_iwmmxt_unpacklsl_M0();
2430 gen_op_iwmmxt_unpacklul_M0();
2435 gen_op_iwmmxt_movq_wRn_M0(wrd);
2436 gen_op_iwmmxt_set_mup();
2437 gen_op_iwmmxt_set_cup();
2439 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2440 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 switch ((insn >> 22) & 3) {
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_unpackhsb_M0();
2449 gen_op_iwmmxt_unpackhub_M0();
2452 if (insn & (1 << 21))
2453 gen_op_iwmmxt_unpackhsw_M0();
2455 gen_op_iwmmxt_unpackhuw_M0();
2458 if (insn & (1 << 21))
2459 gen_op_iwmmxt_unpackhsl_M0();
2461 gen_op_iwmmxt_unpackhul_M0();
2466 gen_op_iwmmxt_movq_wRn_M0(wrd);
2467 gen_op_iwmmxt_set_mup();
2468 gen_op_iwmmxt_set_cup();
2470 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2471 case 0x214: case 0x614: case 0xa14: case 0xe14:
2472 if (((insn >> 22) & 3) == 0)
2474 wrd = (insn >> 12) & 0xf;
2475 rd0 = (insn >> 16) & 0xf;
2476 gen_op_iwmmxt_movq_M0_wRn(rd0);
2477 tmp = tcg_temp_new_i32();
2478 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2479 tcg_temp_free_i32(tmp);
2482 switch ((insn >> 22) & 3) {
2484 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2487 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2490 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2493 tcg_temp_free_i32(tmp);
2494 gen_op_iwmmxt_movq_wRn_M0(wrd);
2495 gen_op_iwmmxt_set_mup();
2496 gen_op_iwmmxt_set_cup();
2498 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2499 case 0x014: case 0x414: case 0x814: case 0xc14:
2500 if (((insn >> 22) & 3) == 0)
2502 wrd = (insn >> 12) & 0xf;
2503 rd0 = (insn >> 16) & 0xf;
2504 gen_op_iwmmxt_movq_M0_wRn(rd0);
2505 tmp = tcg_temp_new_i32();
2506 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2507 tcg_temp_free_i32(tmp);
2510 switch ((insn >> 22) & 3) {
2512 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2515 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2518 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2521 tcg_temp_free_i32(tmp);
2522 gen_op_iwmmxt_movq_wRn_M0(wrd);
2523 gen_op_iwmmxt_set_mup();
2524 gen_op_iwmmxt_set_cup();
2526 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2527 case 0x114: case 0x514: case 0x914: case 0xd14:
2528 if (((insn >> 22) & 3) == 0)
2530 wrd = (insn >> 12) & 0xf;
2531 rd0 = (insn >> 16) & 0xf;
2532 gen_op_iwmmxt_movq_M0_wRn(rd0);
2533 tmp = tcg_temp_new_i32();
2534 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2535 tcg_temp_free_i32(tmp);
2538 switch ((insn >> 22) & 3) {
2540 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2543 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2546 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2549 tcg_temp_free_i32(tmp);
2550 gen_op_iwmmxt_movq_wRn_M0(wrd);
2551 gen_op_iwmmxt_set_mup();
2552 gen_op_iwmmxt_set_cup();
2554 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2555 case 0x314: case 0x714: case 0xb14: case 0xf14:
2556 if (((insn >> 22) & 3) == 0)
2558 wrd = (insn >> 12) & 0xf;
2559 rd0 = (insn >> 16) & 0xf;
2560 gen_op_iwmmxt_movq_M0_wRn(rd0);
2561 tmp = tcg_temp_new_i32();
2562 switch ((insn >> 22) & 3) {
2564 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2565 tcg_temp_free_i32(tmp);
2568 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2571 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2572 tcg_temp_free_i32(tmp);
2575 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2578 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2579 tcg_temp_free_i32(tmp);
2582 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2585 tcg_temp_free_i32(tmp);
2586 gen_op_iwmmxt_movq_wRn_M0(wrd);
2587 gen_op_iwmmxt_set_mup();
2588 gen_op_iwmmxt_set_cup();
2590 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2591 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2592 wrd = (insn >> 12) & 0xf;
2593 rd0 = (insn >> 16) & 0xf;
2594 rd1 = (insn >> 0) & 0xf;
2595 gen_op_iwmmxt_movq_M0_wRn(rd0);
2596 switch ((insn >> 22) & 3) {
2598 if (insn & (1 << 21))
2599 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2601 gen_op_iwmmxt_minub_M0_wRn(rd1);
2604 if (insn & (1 << 21))
2605 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2607 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2610 if (insn & (1 << 21))
2611 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2613 gen_op_iwmmxt_minul_M0_wRn(rd1);
2618 gen_op_iwmmxt_movq_wRn_M0(wrd);
2619 gen_op_iwmmxt_set_mup();
2621 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2622 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2623 wrd = (insn >> 12) & 0xf;
2624 rd0 = (insn >> 16) & 0xf;
2625 rd1 = (insn >> 0) & 0xf;
2626 gen_op_iwmmxt_movq_M0_wRn(rd0);
2627 switch ((insn >> 22) & 3) {
2629 if (insn & (1 << 21))
2630 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2632 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2635 if (insn & (1 << 21))
2636 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2638 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2641 if (insn & (1 << 21))
2642 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2644 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2649 gen_op_iwmmxt_movq_wRn_M0(wrd);
2650 gen_op_iwmmxt_set_mup();
2652 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2653 case 0x402: case 0x502: case 0x602: case 0x702:
2654 wrd = (insn >> 12) & 0xf;
2655 rd0 = (insn >> 16) & 0xf;
2656 rd1 = (insn >> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0);
2658 tmp = tcg_const_i32((insn >> 20) & 3);
2659 iwmmxt_load_reg(cpu_V1, rd1);
2660 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2661 tcg_temp_free_i32(tmp);
2662 gen_op_iwmmxt_movq_wRn_M0(wrd);
2663 gen_op_iwmmxt_set_mup();
2665 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2666 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2667 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2668 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2669 wrd = (insn >> 12) & 0xf;
2670 rd0 = (insn >> 16) & 0xf;
2671 rd1 = (insn >> 0) & 0xf;
2672 gen_op_iwmmxt_movq_M0_wRn(rd0);
2673 switch ((insn >> 20) & 0xf) {
2675 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2678 gen_op_iwmmxt_subub_M0_wRn(rd1);
2681 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2684 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2687 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2690 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2693 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2696 gen_op_iwmmxt_subul_M0_wRn(rd1);
2699 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2704 gen_op_iwmmxt_movq_wRn_M0(wrd);
2705 gen_op_iwmmxt_set_mup();
2706 gen_op_iwmmxt_set_cup();
2708 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2709 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2710 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2711 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2712 wrd = (insn >> 12) & 0xf;
2713 rd0 = (insn >> 16) & 0xf;
2714 gen_op_iwmmxt_movq_M0_wRn(rd0);
2715 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2716 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2717 tcg_temp_free_i32(tmp);
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2722 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2723 case 0x418: case 0x518: case 0x618: case 0x718:
2724 case 0x818: case 0x918: case 0xa18: case 0xb18:
2725 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2726 wrd = (insn >> 12) & 0xf;
2727 rd0 = (insn >> 16) & 0xf;
2728 rd1 = (insn >> 0) & 0xf;
2729 gen_op_iwmmxt_movq_M0_wRn(rd0);
2730 switch ((insn >> 20) & 0xf) {
2732 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2735 gen_op_iwmmxt_addub_M0_wRn(rd1);
2738 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2741 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2744 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2747 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2750 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2753 gen_op_iwmmxt_addul_M0_wRn(rd1);
2756 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2761 gen_op_iwmmxt_movq_wRn_M0(wrd);
2762 gen_op_iwmmxt_set_mup();
2763 gen_op_iwmmxt_set_cup();
2765 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2766 case 0x408: case 0x508: case 0x608: case 0x708:
2767 case 0x808: case 0x908: case 0xa08: case 0xb08:
2768 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2769 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2771 wrd = (insn >> 12) & 0xf;
2772 rd0 = (insn >> 16) & 0xf;
2773 rd1 = (insn >> 0) & 0xf;
2774 gen_op_iwmmxt_movq_M0_wRn(rd0);
2775 switch ((insn >> 22) & 3) {
2777 if (insn & (1 << 21))
2778 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2780 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2783 if (insn & (1 << 21))
2784 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2786 gen_op_iwmmxt_packul_M0_wRn(rd1);
2789 if (insn & (1 << 21))
2790 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2792 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2795 gen_op_iwmmxt_movq_wRn_M0(wrd);
2796 gen_op_iwmmxt_set_mup();
2797 gen_op_iwmmxt_set_cup();
2799 case 0x201: case 0x203: case 0x205: case 0x207:
2800 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2801 case 0x211: case 0x213: case 0x215: case 0x217:
2802 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2803 wrd = (insn >> 5) & 0xf;
2804 rd0 = (insn >> 12) & 0xf;
2805 rd1 = (insn >> 0) & 0xf;
2806 if (rd0 == 0xf || rd1 == 0xf)
2808 gen_op_iwmmxt_movq_M0_wRn(wrd);
2809 tmp = load_reg(s, rd0);
2810 tmp2 = load_reg(s, rd1);
2811 switch ((insn >> 16) & 0xf) {
2812 case 0x0: /* TMIA */
2813 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2815 case 0x8: /* TMIAPH */
2816 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2818 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2819 if (insn & (1 << 16))
2820 tcg_gen_shri_i32(tmp, tmp, 16);
2821 if (insn & (1 << 17))
2822 tcg_gen_shri_i32(tmp2, tmp2, 16);
2823 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2826 tcg_temp_free_i32(tmp2);
2827 tcg_temp_free_i32(tmp);
2830 tcg_temp_free_i32(tmp2);
2831 tcg_temp_free_i32(tmp);
2832 gen_op_iwmmxt_movq_wRn_M0(wrd);
2833 gen_op_iwmmxt_set_mup();
2842 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2843 (ie. an undefined instruction). */
2844 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2846 int acc, rd0, rd1, rdhi, rdlo;
2849 if ((insn & 0x0ff00f10) == 0x0e200010) {
2850 /* Multiply with Internal Accumulate Format */
2851 rd0 = (insn >> 12) & 0xf;
2853 acc = (insn >> 5) & 7;
2858 tmp = load_reg(s, rd0);
2859 tmp2 = load_reg(s, rd1);
2860 switch ((insn >> 16) & 0xf) {
2862 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2864 case 0x8: /* MIAPH */
2865 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2867 case 0xc: /* MIABB */
2868 case 0xd: /* MIABT */
2869 case 0xe: /* MIATB */
2870 case 0xf: /* MIATT */
2871 if (insn & (1 << 16))
2872 tcg_gen_shri_i32(tmp, tmp, 16);
2873 if (insn & (1 << 17))
2874 tcg_gen_shri_i32(tmp2, tmp2, 16);
2875 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2880 tcg_temp_free_i32(tmp2);
2881 tcg_temp_free_i32(tmp);
2883 gen_op_iwmmxt_movq_wRn_M0(acc);
2887 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2888 /* Internal Accumulator Access Format */
2889 rdhi = (insn >> 16) & 0xf;
2890 rdlo = (insn >> 12) & 0xf;
2896 if (insn & ARM_CP_RW_BIT) { /* MRA */
2897 iwmmxt_load_reg(cpu_V0, acc);
2898 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2899 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2900 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2901 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2903 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2904 iwmmxt_store_reg(cpu_V0, acc);
2912 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2913 #define VFP_SREG(insn, bigbit, smallbit) \
2914 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2915 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2916 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2917 reg = (((insn) >> (bigbit)) & 0x0f) \
2918 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2920 if (insn & (1 << (smallbit))) \
2922 reg = ((insn) >> (bigbit)) & 0x0f; \
2925 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2926 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2927 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2928 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2929 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2930 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2932 /* Move between integer and VFP cores. */
2933 static TCGv_i32 gen_vfp_mrs(void)
2935 TCGv_i32 tmp = tcg_temp_new_i32();
2936 tcg_gen_mov_i32(tmp, cpu_F0s);
2940 static void gen_vfp_msr(TCGv_i32 tmp)
2942 tcg_gen_mov_i32(cpu_F0s, tmp);
2943 tcg_temp_free_i32(tmp);
2946 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2948 TCGv_i32 tmp = tcg_temp_new_i32();
2950 tcg_gen_shri_i32(var, var, shift);
2951 tcg_gen_ext8u_i32(var, var);
2952 tcg_gen_shli_i32(tmp, var, 8);
2953 tcg_gen_or_i32(var, var, tmp);
2954 tcg_gen_shli_i32(tmp, var, 16);
2955 tcg_gen_or_i32(var, var, tmp);
2956 tcg_temp_free_i32(tmp);
2959 static void gen_neon_dup_low16(TCGv_i32 var)
2961 TCGv_i32 tmp = tcg_temp_new_i32();
2962 tcg_gen_ext16u_i32(var, var);
2963 tcg_gen_shli_i32(tmp, var, 16);
2964 tcg_gen_or_i32(var, var, tmp);
2965 tcg_temp_free_i32(tmp);
2968 static void gen_neon_dup_high16(TCGv_i32 var)
2970 TCGv_i32 tmp = tcg_temp_new_i32();
2971 tcg_gen_andi_i32(var, var, 0xffff0000);
2972 tcg_gen_shri_i32(tmp, var, 16);
2973 tcg_gen_or_i32(var, var, tmp);
2974 tcg_temp_free_i32(tmp);
2977 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2979 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2980 TCGv_i32 tmp = tcg_temp_new_i32();
2983 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2984 gen_neon_dup_u8(tmp, 0);
2987 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2988 gen_neon_dup_low16(tmp);
2991 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2993 default: /* Avoid compiler warnings. */
2999 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3002 uint32_t cc = extract32(insn, 20, 2);
3005 TCGv_i64 frn, frm, dest;
3006 TCGv_i64 tmp, zero, zf, nf, vf;
3008 zero = tcg_const_i64(0);
3010 frn = tcg_temp_new_i64();
3011 frm = tcg_temp_new_i64();
3012 dest = tcg_temp_new_i64();
3014 zf = tcg_temp_new_i64();
3015 nf = tcg_temp_new_i64();
3016 vf = tcg_temp_new_i64();
3018 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3019 tcg_gen_ext_i32_i64(nf, cpu_NF);
3020 tcg_gen_ext_i32_i64(vf, cpu_VF);
3022 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3023 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3026 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3030 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3033 case 2: /* ge: N == V -> N ^ V == 0 */
3034 tmp = tcg_temp_new_i64();
3035 tcg_gen_xor_i64(tmp, vf, nf);
3036 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3038 tcg_temp_free_i64(tmp);
3040 case 3: /* gt: !Z && N == V */
3041 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3043 tmp = tcg_temp_new_i64();
3044 tcg_gen_xor_i64(tmp, vf, nf);
3045 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3047 tcg_temp_free_i64(tmp);
3050 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3051 tcg_temp_free_i64(frn);
3052 tcg_temp_free_i64(frm);
3053 tcg_temp_free_i64(dest);
3055 tcg_temp_free_i64(zf);
3056 tcg_temp_free_i64(nf);
3057 tcg_temp_free_i64(vf);
3059 tcg_temp_free_i64(zero);
3061 TCGv_i32 frn, frm, dest;
3064 zero = tcg_const_i32(0);
3066 frn = tcg_temp_new_i32();
3067 frm = tcg_temp_new_i32();
3068 dest = tcg_temp_new_i32();
3069 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3070 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3073 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3077 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3080 case 2: /* ge: N == V -> N ^ V == 0 */
3081 tmp = tcg_temp_new_i32();
3082 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3083 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3085 tcg_temp_free_i32(tmp);
3087 case 3: /* gt: !Z && N == V */
3088 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3090 tmp = tcg_temp_new_i32();
3091 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3092 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3094 tcg_temp_free_i32(tmp);
3097 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3098 tcg_temp_free_i32(frn);
3099 tcg_temp_free_i32(frm);
3100 tcg_temp_free_i32(dest);
3102 tcg_temp_free_i32(zero);
3108 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3109 uint32_t rm, uint32_t dp)
3111 uint32_t vmin = extract32(insn, 6, 1);
3112 TCGv_ptr fpst = get_fpstatus_ptr(0);
3115 TCGv_i64 frn, frm, dest;
3117 frn = tcg_temp_new_i64();
3118 frm = tcg_temp_new_i64();
3119 dest = tcg_temp_new_i64();
3121 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3122 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3124 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
3126 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
3128 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3129 tcg_temp_free_i64(frn);
3130 tcg_temp_free_i64(frm);
3131 tcg_temp_free_i64(dest);
3133 TCGv_i32 frn, frm, dest;
3135 frn = tcg_temp_new_i32();
3136 frm = tcg_temp_new_i32();
3137 dest = tcg_temp_new_i32();
3139 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3140 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3142 gen_helper_vfp_minnums(dest, frn, frm, fpst);
3144 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
3146 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3147 tcg_temp_free_i32(frn);
3148 tcg_temp_free_i32(frm);
3149 tcg_temp_free_i32(dest);
3152 tcg_temp_free_ptr(fpst);
3156 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3159 TCGv_ptr fpst = get_fpstatus_ptr(0);
3162 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3163 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3168 tcg_op = tcg_temp_new_i64();
3169 tcg_res = tcg_temp_new_i64();
3170 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3171 gen_helper_rintd(tcg_res, tcg_op, fpst);
3172 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3173 tcg_temp_free_i64(tcg_op);
3174 tcg_temp_free_i64(tcg_res);
3178 tcg_op = tcg_temp_new_i32();
3179 tcg_res = tcg_temp_new_i32();
3180 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3181 gen_helper_rints(tcg_res, tcg_op, fpst);
3182 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3183 tcg_temp_free_i32(tcg_op);
3184 tcg_temp_free_i32(tcg_res);
3187 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3188 tcg_temp_free_i32(tcg_rmode);
3190 tcg_temp_free_ptr(fpst);
3194 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3197 bool is_signed = extract32(insn, 7, 1);
3198 TCGv_ptr fpst = get_fpstatus_ptr(0);
3199 TCGv_i32 tcg_rmode, tcg_shift;
3201 tcg_shift = tcg_const_i32(0);
3203 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3204 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3207 TCGv_i64 tcg_double, tcg_res;
3209 /* Rd is encoded as a single precision register even when the source
3210 * is double precision.
3212 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3213 tcg_double = tcg_temp_new_i64();
3214 tcg_res = tcg_temp_new_i64();
3215 tcg_tmp = tcg_temp_new_i32();
3216 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3218 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3220 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3222 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3223 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3224 tcg_temp_free_i32(tcg_tmp);
3225 tcg_temp_free_i64(tcg_res);
3226 tcg_temp_free_i64(tcg_double);
3228 TCGv_i32 tcg_single, tcg_res;
3229 tcg_single = tcg_temp_new_i32();
3230 tcg_res = tcg_temp_new_i32();
3231 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3233 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3235 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3237 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3238 tcg_temp_free_i32(tcg_res);
3239 tcg_temp_free_i32(tcg_single);
3242 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3243 tcg_temp_free_i32(tcg_rmode);
3245 tcg_temp_free_i32(tcg_shift);
3247 tcg_temp_free_ptr(fpst);
3252 /* Table for converting the most common AArch32 encoding of
3253 * rounding mode to arm_fprounding order (which matches the
3254 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3256 static const uint8_t fp_decode_rm[] = {
3263 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3265 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3267 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3272 VFP_DREG_D(rd, insn);
3273 VFP_DREG_N(rn, insn);
3274 VFP_DREG_M(rm, insn);
3276 rd = VFP_SREG_D(insn);
3277 rn = VFP_SREG_N(insn);
3278 rm = VFP_SREG_M(insn);
3281 if ((insn & 0x0f800e50) == 0x0e000a00) {
3282 return handle_vsel(insn, rd, rn, rm, dp);
3283 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3284 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3285 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3286 /* VRINTA, VRINTN, VRINTP, VRINTM */
3287 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3288 return handle_vrint(insn, rd, rm, dp, rounding);
3289 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3290 /* VCVTA, VCVTN, VCVTP, VCVTM */
3291 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3292 return handle_vcvt(insn, rd, rm, dp, rounding);
3297 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3298 (ie. an undefined instruction). */
3299 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3301 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3307 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3311 /* FIXME: this access check should not take precedence over UNDEF
3312 * for invalid encodings; we will generate incorrect syndrome information
3313 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3315 if (s->fp_excp_el) {
3316 gen_exception_insn(s, 4, EXCP_UDEF,
3317 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3321 if (!s->vfp_enabled) {
3322 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3323 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3325 rn = (insn >> 16) & 0xf;
3326 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3327 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3332 if (extract32(insn, 28, 4) == 0xf) {
3333 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3334 * only used in v8 and above.
3336 return disas_vfp_v8_insn(s, insn);
3339 dp = ((insn & 0xf00) == 0xb00);
3340 switch ((insn >> 24) & 0xf) {
3342 if (insn & (1 << 4)) {
3343 /* single register transfer */
3344 rd = (insn >> 12) & 0xf;
3349 VFP_DREG_N(rn, insn);
3352 if (insn & 0x00c00060
3353 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3357 pass = (insn >> 21) & 1;
3358 if (insn & (1 << 22)) {
3360 offset = ((insn >> 5) & 3) * 8;
3361 } else if (insn & (1 << 5)) {
3363 offset = (insn & (1 << 6)) ? 16 : 0;
3368 if (insn & ARM_CP_RW_BIT) {
3370 tmp = neon_load_reg(rn, pass);
3374 tcg_gen_shri_i32(tmp, tmp, offset);
3375 if (insn & (1 << 23))
3381 if (insn & (1 << 23)) {
3383 tcg_gen_shri_i32(tmp, tmp, 16);
3389 tcg_gen_sari_i32(tmp, tmp, 16);
3398 store_reg(s, rd, tmp);
3401 tmp = load_reg(s, rd);
3402 if (insn & (1 << 23)) {
3405 gen_neon_dup_u8(tmp, 0);
3406 } else if (size == 1) {
3407 gen_neon_dup_low16(tmp);
3409 for (n = 0; n <= pass * 2; n++) {
3410 tmp2 = tcg_temp_new_i32();
3411 tcg_gen_mov_i32(tmp2, tmp);
3412 neon_store_reg(rn, n, tmp2);
3414 neon_store_reg(rn, n, tmp);
3419 tmp2 = neon_load_reg(rn, pass);
3420 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3421 tcg_temp_free_i32(tmp2);
3424 tmp2 = neon_load_reg(rn, pass);
3425 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3426 tcg_temp_free_i32(tmp2);
3431 neon_store_reg(rn, pass, tmp);
3435 if ((insn & 0x6f) != 0x00)
3437 rn = VFP_SREG_N(insn);
3438 if (insn & ARM_CP_RW_BIT) {
3440 if (insn & (1 << 21)) {
3441 /* system register */
3446 /* VFP2 allows access to FSID from userspace.
3447 VFP3 restricts all id registers to privileged
3450 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3453 tmp = load_cpu_field(vfp.xregs[rn]);
3458 tmp = load_cpu_field(vfp.xregs[rn]);
3460 case ARM_VFP_FPINST:
3461 case ARM_VFP_FPINST2:
3462 /* Not present in VFP3. */
3464 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3467 tmp = load_cpu_field(vfp.xregs[rn]);
3471 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3472 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3474 tmp = tcg_temp_new_i32();
3475 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3479 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3486 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3489 tmp = load_cpu_field(vfp.xregs[rn]);
3495 gen_mov_F0_vreg(0, rn);
3496 tmp = gen_vfp_mrs();
3499 /* Set the 4 flag bits in the CPSR. */
3501 tcg_temp_free_i32(tmp);
3503 store_reg(s, rd, tmp);
3507 if (insn & (1 << 21)) {
3509 /* system register */
3514 /* Writes are ignored. */
3517 tmp = load_reg(s, rd);
3518 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3519 tcg_temp_free_i32(tmp);
3525 /* TODO: VFP subarchitecture support.
3526 * For now, keep the EN bit only */
3527 tmp = load_reg(s, rd);
3528 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3529 store_cpu_field(tmp, vfp.xregs[rn]);
3532 case ARM_VFP_FPINST:
3533 case ARM_VFP_FPINST2:
3537 tmp = load_reg(s, rd);
3538 store_cpu_field(tmp, vfp.xregs[rn]);
3544 tmp = load_reg(s, rd);
3546 gen_mov_vreg_F0(0, rn);
3551 /* data processing */
3552 /* The opcode is in bits 23, 21, 20 and 6. */
3553 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3557 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3559 /* rn is register number */
3560 VFP_DREG_N(rn, insn);
3563 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3564 ((rn & 0x1e) == 0x6))) {
3565 /* Integer or single/half precision destination. */
3566 rd = VFP_SREG_D(insn);
3568 VFP_DREG_D(rd, insn);
3571 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3572 ((rn & 0x1e) == 0x4))) {
3573 /* VCVT from int or half precision is always from S reg
3574 * regardless of dp bit. VCVT with immediate frac_bits
3575 * has same format as SREG_M.
3577 rm = VFP_SREG_M(insn);
3579 VFP_DREG_M(rm, insn);
3582 rn = VFP_SREG_N(insn);
3583 if (op == 15 && rn == 15) {
3584 /* Double precision destination. */
3585 VFP_DREG_D(rd, insn);
3587 rd = VFP_SREG_D(insn);
3589 /* NB that we implicitly rely on the encoding for the frac_bits
3590 * in VCVT of fixed to float being the same as that of an SREG_M
3592 rm = VFP_SREG_M(insn);
3595 veclen = s->vec_len;
3596 if (op == 15 && rn > 3)
3599 /* Shut up compiler warnings. */
3610 /* Figure out what type of vector operation this is. */
3611 if ((rd & bank_mask) == 0) {
3616 delta_d = (s->vec_stride >> 1) + 1;
3618 delta_d = s->vec_stride + 1;
3620 if ((rm & bank_mask) == 0) {
3621 /* mixed scalar/vector */
3630 /* Load the initial operands. */
3635 /* Integer source */
3636 gen_mov_F0_vreg(0, rm);
3641 gen_mov_F0_vreg(dp, rd);
3642 gen_mov_F1_vreg(dp, rm);
3646 /* Compare with zero */
3647 gen_mov_F0_vreg(dp, rd);
3658 /* Source and destination the same. */
3659 gen_mov_F0_vreg(dp, rd);
3665 /* VCVTB, VCVTT: only present with the halfprec extension
3666 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3667 * (we choose to UNDEF)
3669 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3670 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3673 if (!extract32(rn, 1, 1)) {
3674 /* Half precision source. */
3675 gen_mov_F0_vreg(0, rm);
3678 /* Otherwise fall through */
3680 /* One source operand. */
3681 gen_mov_F0_vreg(dp, rm);
3685 /* Two source operands. */
3686 gen_mov_F0_vreg(dp, rn);
3687 gen_mov_F1_vreg(dp, rm);
3691 /* Perform the calculation. */
3693 case 0: /* VMLA: fd + (fn * fm) */
3694 /* Note that order of inputs to the add matters for NaNs */
3696 gen_mov_F0_vreg(dp, rd);
3699 case 1: /* VMLS: fd + -(fn * fm) */
3702 gen_mov_F0_vreg(dp, rd);
3705 case 2: /* VNMLS: -fd + (fn * fm) */
3706 /* Note that it isn't valid to replace (-A + B) with (B - A)
3707 * or similar plausible looking simplifications
3708 * because this will give wrong results for NaNs.
3711 gen_mov_F0_vreg(dp, rd);
3715 case 3: /* VNMLA: -fd + -(fn * fm) */
3718 gen_mov_F0_vreg(dp, rd);
3722 case 4: /* mul: fn * fm */
3725 case 5: /* nmul: -(fn * fm) */
3729 case 6: /* add: fn + fm */
3732 case 7: /* sub: fn - fm */
3735 case 8: /* div: fn / fm */
3738 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3739 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3740 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3741 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3742 /* These are fused multiply-add, and must be done as one
3743 * floating point operation with no rounding between the
3744 * multiplication and addition steps.
3745 * NB that doing the negations here as separate steps is
3746 * correct : an input NaN should come out with its sign bit
3747 * flipped if it is a negated-input.
3749 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3757 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3759 frd = tcg_temp_new_i64();
3760 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3763 gen_helper_vfp_negd(frd, frd);
3765 fpst = get_fpstatus_ptr(0);
3766 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3767 cpu_F1d, frd, fpst);
3768 tcg_temp_free_ptr(fpst);
3769 tcg_temp_free_i64(frd);
3775 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3777 frd = tcg_temp_new_i32();
3778 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3780 gen_helper_vfp_negs(frd, frd);
3782 fpst = get_fpstatus_ptr(0);
3783 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3784 cpu_F1s, frd, fpst);
3785 tcg_temp_free_ptr(fpst);
3786 tcg_temp_free_i32(frd);
3789 case 14: /* fconst */
3790 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3794 n = (insn << 12) & 0x80000000;
3795 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3802 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3809 tcg_gen_movi_i32(cpu_F0s, n);
3812 case 15: /* extension space */
3826 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3828 TCGv_ptr fpst = get_fpstatus_ptr(false);
3829 TCGv_i32 ahp_mode = get_ahp_flag();
3830 tmp = gen_vfp_mrs();
3831 tcg_gen_ext16u_i32(tmp, tmp);
3833 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3836 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3839 tcg_temp_free_i32(ahp_mode);
3840 tcg_temp_free_ptr(fpst);
3841 tcg_temp_free_i32(tmp);
3844 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3846 TCGv_ptr fpst = get_fpstatus_ptr(false);
3847 TCGv_i32 ahp = get_ahp_flag();
3848 tmp = gen_vfp_mrs();
3849 tcg_gen_shri_i32(tmp, tmp, 16);
3851 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3854 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3857 tcg_temp_free_i32(tmp);
3858 tcg_temp_free_i32(ahp);
3859 tcg_temp_free_ptr(fpst);
3862 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3864 TCGv_ptr fpst = get_fpstatus_ptr(false);
3865 TCGv_i32 ahp = get_ahp_flag();
3866 tmp = tcg_temp_new_i32();
3869 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3872 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3875 tcg_temp_free_i32(ahp);
3876 tcg_temp_free_ptr(fpst);
3877 gen_mov_F0_vreg(0, rd);
3878 tmp2 = gen_vfp_mrs();
3879 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3880 tcg_gen_or_i32(tmp, tmp, tmp2);
3881 tcg_temp_free_i32(tmp2);
3885 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3887 TCGv_ptr fpst = get_fpstatus_ptr(false);
3888 TCGv_i32 ahp = get_ahp_flag();
3889 tmp = tcg_temp_new_i32();
3891 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3894 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3897 tcg_temp_free_i32(ahp);
3898 tcg_temp_free_ptr(fpst);
3899 tcg_gen_shli_i32(tmp, tmp, 16);
3900 gen_mov_F0_vreg(0, rd);
3901 tmp2 = gen_vfp_mrs();
3902 tcg_gen_ext16u_i32(tmp2, tmp2);
3903 tcg_gen_or_i32(tmp, tmp, tmp2);
3904 tcg_temp_free_i32(tmp2);
3917 case 11: /* cmpez */
3921 case 12: /* vrintr */
3923 TCGv_ptr fpst = get_fpstatus_ptr(0);
3925 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3927 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3929 tcg_temp_free_ptr(fpst);
3932 case 13: /* vrintz */
3934 TCGv_ptr fpst = get_fpstatus_ptr(0);
3936 tcg_rmode = tcg_const_i32(float_round_to_zero);
3937 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3939 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3941 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3943 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3944 tcg_temp_free_i32(tcg_rmode);
3945 tcg_temp_free_ptr(fpst);
3948 case 14: /* vrintx */
3950 TCGv_ptr fpst = get_fpstatus_ptr(0);
3952 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3954 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3956 tcg_temp_free_ptr(fpst);
3959 case 15: /* single<->double conversion */
3961 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3963 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3965 case 16: /* fuito */
3966 gen_vfp_uito(dp, 0);
3968 case 17: /* fsito */
3969 gen_vfp_sito(dp, 0);
3971 case 20: /* fshto */
3972 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3975 gen_vfp_shto(dp, 16 - rm, 0);
3977 case 21: /* fslto */
3978 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3981 gen_vfp_slto(dp, 32 - rm, 0);
3983 case 22: /* fuhto */
3984 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3987 gen_vfp_uhto(dp, 16 - rm, 0);
3989 case 23: /* fulto */
3990 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3993 gen_vfp_ulto(dp, 32 - rm, 0);
3995 case 24: /* ftoui */
3996 gen_vfp_toui(dp, 0);
3998 case 25: /* ftouiz */
3999 gen_vfp_touiz(dp, 0);
4001 case 26: /* ftosi */
4002 gen_vfp_tosi(dp, 0);
4004 case 27: /* ftosiz */
4005 gen_vfp_tosiz(dp, 0);
4007 case 28: /* ftosh */
4008 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4011 gen_vfp_tosh(dp, 16 - rm, 0);
4013 case 29: /* ftosl */
4014 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4017 gen_vfp_tosl(dp, 32 - rm, 0);
4019 case 30: /* ftouh */
4020 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4023 gen_vfp_touh(dp, 16 - rm, 0);
4025 case 31: /* ftoul */
4026 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4029 gen_vfp_toul(dp, 32 - rm, 0);
4031 default: /* undefined */
4035 default: /* undefined */
4039 /* Write back the result. */
4040 if (op == 15 && (rn >= 8 && rn <= 11)) {
4041 /* Comparison, do nothing. */
4042 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4043 (rn & 0x1e) == 0x6)) {
4044 /* VCVT double to int: always integer result.
4045 * VCVT double to half precision is always a single
4048 gen_mov_vreg_F0(0, rd);
4049 } else if (op == 15 && rn == 15) {
4051 gen_mov_vreg_F0(!dp, rd);
4053 gen_mov_vreg_F0(dp, rd);
4056 /* break out of the loop if we have finished */
4060 if (op == 15 && delta_m == 0) {
4061 /* single source one-many */
4063 rd = ((rd + delta_d) & (bank_mask - 1))
4065 gen_mov_vreg_F0(dp, rd);
4069 /* Setup the next operands. */
4071 rd = ((rd + delta_d) & (bank_mask - 1))
4075 /* One source operand. */
4076 rm = ((rm + delta_m) & (bank_mask - 1))
4078 gen_mov_F0_vreg(dp, rm);
4080 /* Two source operands. */
4081 rn = ((rn + delta_d) & (bank_mask - 1))
4083 gen_mov_F0_vreg(dp, rn);
4085 rm = ((rm + delta_m) & (bank_mask - 1))
4087 gen_mov_F1_vreg(dp, rm);
4095 if ((insn & 0x03e00000) == 0x00400000) {
4096 /* two-register transfer */
4097 rn = (insn >> 16) & 0xf;
4098 rd = (insn >> 12) & 0xf;
4100 VFP_DREG_M(rm, insn);
4102 rm = VFP_SREG_M(insn);
4105 if (insn & ARM_CP_RW_BIT) {
4108 gen_mov_F0_vreg(0, rm * 2);
4109 tmp = gen_vfp_mrs();
4110 store_reg(s, rd, tmp);
4111 gen_mov_F0_vreg(0, rm * 2 + 1);
4112 tmp = gen_vfp_mrs();
4113 store_reg(s, rn, tmp);
4115 gen_mov_F0_vreg(0, rm);
4116 tmp = gen_vfp_mrs();
4117 store_reg(s, rd, tmp);
4118 gen_mov_F0_vreg(0, rm + 1);
4119 tmp = gen_vfp_mrs();
4120 store_reg(s, rn, tmp);
4125 tmp = load_reg(s, rd);
4127 gen_mov_vreg_F0(0, rm * 2);
4128 tmp = load_reg(s, rn);
4130 gen_mov_vreg_F0(0, rm * 2 + 1);
4132 tmp = load_reg(s, rd);
4134 gen_mov_vreg_F0(0, rm);
4135 tmp = load_reg(s, rn);
4137 gen_mov_vreg_F0(0, rm + 1);
4142 rn = (insn >> 16) & 0xf;
4144 VFP_DREG_D(rd, insn);
4146 rd = VFP_SREG_D(insn);
4147 if ((insn & 0x01200000) == 0x01000000) {
4148 /* Single load/store */
4149 offset = (insn & 0xff) << 2;
4150 if ((insn & (1 << 23)) == 0)
4152 if (s->thumb && rn == 15) {
4153 /* This is actually UNPREDICTABLE */
4154 addr = tcg_temp_new_i32();
4155 tcg_gen_movi_i32(addr, s->pc & ~2);
4157 addr = load_reg(s, rn);
4159 tcg_gen_addi_i32(addr, addr, offset);
4160 if (insn & (1 << 20)) {
4161 gen_vfp_ld(s, dp, addr);
4162 gen_mov_vreg_F0(dp, rd);
4164 gen_mov_F0_vreg(dp, rd);
4165 gen_vfp_st(s, dp, addr);
4167 tcg_temp_free_i32(addr);
4169 /* load/store multiple */
4170 int w = insn & (1 << 21);
4172 n = (insn >> 1) & 0x7f;
4176 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4177 /* P == U , W == 1 => UNDEF */
4180 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4181 /* UNPREDICTABLE cases for bad immediates: we choose to
4182 * UNDEF to avoid generating huge numbers of TCG ops
4186 if (rn == 15 && w) {
4187 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4191 if (s->thumb && rn == 15) {
4192 /* This is actually UNPREDICTABLE */
4193 addr = tcg_temp_new_i32();
4194 tcg_gen_movi_i32(addr, s->pc & ~2);
4196 addr = load_reg(s, rn);
4198 if (insn & (1 << 24)) /* pre-decrement */
4199 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4205 for (i = 0; i < n; i++) {
4206 if (insn & ARM_CP_RW_BIT) {
4208 gen_vfp_ld(s, dp, addr);
4209 gen_mov_vreg_F0(dp, rd + i);
4212 gen_mov_F0_vreg(dp, rd + i);
4213 gen_vfp_st(s, dp, addr);
4215 tcg_gen_addi_i32(addr, addr, offset);
4219 if (insn & (1 << 24))
4220 offset = -offset * n;
4221 else if (dp && (insn & 1))
4227 tcg_gen_addi_i32(addr, addr, offset);
4228 store_reg(s, rn, addr);
4230 tcg_temp_free_i32(addr);
4236 /* Should never happen. */
4242 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4244 #ifndef CONFIG_USER_ONLY
4245 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4246 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4252 static void gen_goto_ptr(void)
4254 tcg_gen_lookup_and_goto_ptr();
4257 /* This will end the TB but doesn't guarantee we'll return to
4258 * cpu_loop_exec. Any live exit_requests will be processed as we
4259 * enter the next TB.
4261 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4263 if (use_goto_tb(s, dest)) {
4265 gen_set_pc_im(s, dest);
4266 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
4268 gen_set_pc_im(s, dest);
4271 s->base.is_jmp = DISAS_NORETURN;
4274 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4276 if (unlikely(is_singlestepping(s))) {
4277 /* An indirect jump so that we still trigger the debug exception. */
4282 gen_goto_tb(s, 0, dest);
4286 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4289 tcg_gen_sari_i32(t0, t0, 16);
4293 tcg_gen_sari_i32(t1, t1, 16);
4296 tcg_gen_mul_i32(t0, t0, t1);
4299 /* Return the mask of PSR bits set by a MSR instruction. */
4300 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4305 if (flags & (1 << 0))
4307 if (flags & (1 << 1))
4309 if (flags & (1 << 2))
4311 if (flags & (1 << 3))
4314 /* Mask out undefined bits. */
4315 mask &= ~CPSR_RESERVED;
4316 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4319 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4320 mask &= ~CPSR_Q; /* V5TE in reality*/
4322 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4323 mask &= ~(CPSR_E | CPSR_GE);
4325 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4328 /* Mask out execution state and reserved bits. */
4330 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4332 /* Mask out privileged bits. */
4338 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4339 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4343 /* ??? This is also undefined in system mode. */
4347 tmp = load_cpu_field(spsr);
4348 tcg_gen_andi_i32(tmp, tmp, ~mask);
4349 tcg_gen_andi_i32(t0, t0, mask);
4350 tcg_gen_or_i32(tmp, tmp, t0);
4351 store_cpu_field(tmp, spsr);
4353 gen_set_cpsr(t0, mask);
4355 tcg_temp_free_i32(t0);
4360 /* Returns nonzero if access to the PSR is not permitted. */
4361 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4364 tmp = tcg_temp_new_i32();
4365 tcg_gen_movi_i32(tmp, val);
4366 return gen_set_psr(s, mask, spsr, tmp);
4369 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4370 int *tgtmode, int *regno)
4372 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4373 * the target mode and register number, and identify the various
4374 * unpredictable cases.
4375 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4376 * + executed in user mode
4377 * + using R15 as the src/dest register
4378 * + accessing an unimplemented register
4379 * + accessing a register that's inaccessible at current PL/security state*
4380 * + accessing a register that you could access with a different insn
4381 * We choose to UNDEF in all these cases.
4382 * Since we don't know which of the various AArch32 modes we are in
4383 * we have to defer some checks to runtime.
4384 * Accesses to Monitor mode registers from Secure EL1 (which implies
4385 * that EL3 is AArch64) must trap to EL3.
4387 * If the access checks fail this function will emit code to take
4388 * an exception and return false. Otherwise it will return true,
4389 * and set *tgtmode and *regno appropriately.
4391 int exc_target = default_exception_el(s);
4393 /* These instructions are present only in ARMv8, or in ARMv7 with the
4394 * Virtualization Extensions.
4396 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4397 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4401 if (IS_USER(s) || rn == 15) {
4405 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4406 * of registers into (r, sysm).
4409 /* SPSRs for other modes */
4411 case 0xe: /* SPSR_fiq */
4412 *tgtmode = ARM_CPU_MODE_FIQ;
4414 case 0x10: /* SPSR_irq */
4415 *tgtmode = ARM_CPU_MODE_IRQ;
4417 case 0x12: /* SPSR_svc */
4418 *tgtmode = ARM_CPU_MODE_SVC;
4420 case 0x14: /* SPSR_abt */
4421 *tgtmode = ARM_CPU_MODE_ABT;
4423 case 0x16: /* SPSR_und */
4424 *tgtmode = ARM_CPU_MODE_UND;
4426 case 0x1c: /* SPSR_mon */
4427 *tgtmode = ARM_CPU_MODE_MON;
4429 case 0x1e: /* SPSR_hyp */
4430 *tgtmode = ARM_CPU_MODE_HYP;
4432 default: /* unallocated */
4435 /* We arbitrarily assign SPSR a register number of 16. */
4438 /* general purpose registers for other modes */
4440 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4441 *tgtmode = ARM_CPU_MODE_USR;
4444 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4445 *tgtmode = ARM_CPU_MODE_FIQ;
4448 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4449 *tgtmode = ARM_CPU_MODE_IRQ;
4450 *regno = sysm & 1 ? 13 : 14;
4452 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4453 *tgtmode = ARM_CPU_MODE_SVC;
4454 *regno = sysm & 1 ? 13 : 14;
4456 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4457 *tgtmode = ARM_CPU_MODE_ABT;
4458 *regno = sysm & 1 ? 13 : 14;
4460 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4461 *tgtmode = ARM_CPU_MODE_UND;
4462 *regno = sysm & 1 ? 13 : 14;
4464 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4465 *tgtmode = ARM_CPU_MODE_MON;
4466 *regno = sysm & 1 ? 13 : 14;
4468 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4469 *tgtmode = ARM_CPU_MODE_HYP;
4470 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4471 *regno = sysm & 1 ? 13 : 17;
4473 default: /* unallocated */
4478 /* Catch the 'accessing inaccessible register' cases we can detect
4479 * at translate time.
4482 case ARM_CPU_MODE_MON:
4483 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4486 if (s->current_el == 1) {
4487 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4488 * then accesses to Mon registers trap to EL3
4494 case ARM_CPU_MODE_HYP:
4495 /* Note that we can forbid accesses from EL2 here because they
4496 * must be from Hyp mode itself
4498 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4509 /* If we get here then some access check did not pass */
4510 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4514 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4516 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4517 int tgtmode = 0, regno = 0;
4519 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
4523 /* Sync state because msr_banked() can raise exceptions */
4524 gen_set_condexec(s);
4525 gen_set_pc_im(s, s->pc - 4);
4526 tcg_reg = load_reg(s, rn);
4527 tcg_tgtmode = tcg_const_i32(tgtmode);
4528 tcg_regno = tcg_const_i32(regno);
4529 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4530 tcg_temp_free_i32(tcg_tgtmode);
4531 tcg_temp_free_i32(tcg_regno);
4532 tcg_temp_free_i32(tcg_reg);
4533 s->base.is_jmp = DISAS_UPDATE;
4536 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4538 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4539 int tgtmode = 0, regno = 0;
4541 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
4545 /* Sync state because mrs_banked() can raise exceptions */
4546 gen_set_condexec(s);
4547 gen_set_pc_im(s, s->pc - 4);
4548 tcg_reg = tcg_temp_new_i32();
4549 tcg_tgtmode = tcg_const_i32(tgtmode);
4550 tcg_regno = tcg_const_i32(regno);
4551 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4552 tcg_temp_free_i32(tcg_tgtmode);
4553 tcg_temp_free_i32(tcg_regno);
4554 store_reg(s, rn, tcg_reg);
4555 s->base.is_jmp = DISAS_UPDATE;
4558 /* Store value to PC as for an exception return (ie don't
4559 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4560 * will do the masking based on the new value of the Thumb bit.
4562 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4564 tcg_gen_mov_i32(cpu_R[15], pc);
4565 tcg_temp_free_i32(pc);
4568 /* Generate a v6 exception return. Marks both values as dead. */
4569 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4571 store_pc_exc_ret(s, pc);
4572 /* The cpsr_write_eret helper will mask the low bits of PC
4573 * appropriately depending on the new Thumb bit, so it must
4574 * be called after storing the new PC.
4576 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4579 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4580 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4583 tcg_temp_free_i32(cpsr);
4584 /* Must exit loop to check un-masked IRQs */
4585 s->base.is_jmp = DISAS_EXIT;
4588 /* Generate an old-style exception return. Marks pc as dead. */
4589 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4591 gen_rfe(s, pc, load_cpu_field(spsr));
4595 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4596 * only call the helper when running single threaded TCG code to ensure
4597 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4598 * just skip this instruction. Currently the SEV/SEVL instructions
4599 * which are *one* of many ways to wake the CPU from WFE are not
4600 * implemented so we can't sleep like WFI does.
4602 static void gen_nop_hint(DisasContext *s, int val)
4605 /* When running in MTTCG we don't generate jumps to the yield and
4606 * WFE helpers as it won't affect the scheduling of other vCPUs.
4607 * If we wanted to more completely model WFE/SEV so we don't busy
4608 * spin unnecessarily we would need to do something more involved.
4611 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4612 gen_set_pc_im(s, s->pc);
4613 s->base.is_jmp = DISAS_YIELD;
4617 gen_set_pc_im(s, s->pc);
4618 s->base.is_jmp = DISAS_WFI;
4621 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4622 gen_set_pc_im(s, s->pc);
4623 s->base.is_jmp = DISAS_WFE;
4628 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4634 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4636 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4639 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4640 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4641 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4646 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4649 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4650 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4651 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4656 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4657 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4658 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4659 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4660 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4662 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4663 switch ((size << 1) | u) { \
4665 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4668 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4671 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4674 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4677 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4680 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4682 default: return 1; \
4685 #define GEN_NEON_INTEGER_OP(name) do { \
4686 switch ((size << 1) | u) { \
4688 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4691 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4694 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4697 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4700 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4703 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4705 default: return 1; \
4708 static TCGv_i32 neon_load_scratch(int scratch)
4710 TCGv_i32 tmp = tcg_temp_new_i32();
4711 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4715 static void neon_store_scratch(int scratch, TCGv_i32 var)
4717 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4718 tcg_temp_free_i32(var);
4721 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4725 tmp = neon_load_reg(reg & 7, reg >> 4);
4727 gen_neon_dup_high16(tmp);
4729 gen_neon_dup_low16(tmp);
4732 tmp = neon_load_reg(reg & 15, reg >> 4);
4737 static int gen_neon_unzip(int rd, int rm, int size, int q)
4741 if (!q && size == 2) {
4744 pd = vfp_reg_ptr(true, rd);
4745 pm = vfp_reg_ptr(true, rm);
4749 gen_helper_neon_qunzip8(pd, pm);
4752 gen_helper_neon_qunzip16(pd, pm);
4755 gen_helper_neon_qunzip32(pd, pm);
4763 gen_helper_neon_unzip8(pd, pm);
4766 gen_helper_neon_unzip16(pd, pm);
4772 tcg_temp_free_ptr(pd);
4773 tcg_temp_free_ptr(pm);
4777 static int gen_neon_zip(int rd, int rm, int size, int q)
4781 if (!q && size == 2) {
4784 pd = vfp_reg_ptr(true, rd);
4785 pm = vfp_reg_ptr(true, rm);
4789 gen_helper_neon_qzip8(pd, pm);
4792 gen_helper_neon_qzip16(pd, pm);
4795 gen_helper_neon_qzip32(pd, pm);
4803 gen_helper_neon_zip8(pd, pm);
4806 gen_helper_neon_zip16(pd, pm);
4812 tcg_temp_free_ptr(pd);
4813 tcg_temp_free_ptr(pm);
4817 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4821 rd = tcg_temp_new_i32();
4822 tmp = tcg_temp_new_i32();
4824 tcg_gen_shli_i32(rd, t0, 8);
4825 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4826 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4827 tcg_gen_or_i32(rd, rd, tmp);
4829 tcg_gen_shri_i32(t1, t1, 8);
4830 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4831 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4832 tcg_gen_or_i32(t1, t1, tmp);
4833 tcg_gen_mov_i32(t0, rd);
4835 tcg_temp_free_i32(tmp);
4836 tcg_temp_free_i32(rd);
4839 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4843 rd = tcg_temp_new_i32();
4844 tmp = tcg_temp_new_i32();
4846 tcg_gen_shli_i32(rd, t0, 16);
4847 tcg_gen_andi_i32(tmp, t1, 0xffff);
4848 tcg_gen_or_i32(rd, rd, tmp);
4849 tcg_gen_shri_i32(t1, t1, 16);
4850 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4851 tcg_gen_or_i32(t1, t1, tmp);
4852 tcg_gen_mov_i32(t0, rd);
4854 tcg_temp_free_i32(tmp);
4855 tcg_temp_free_i32(rd);
4863 } neon_ls_element_type[11] = {
4877 /* Translate a NEON load/store element instruction. Return nonzero if the
4878 instruction is invalid. */
4879 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4898 /* FIXME: this access check should not take precedence over UNDEF
4899 * for invalid encodings; we will generate incorrect syndrome information
4900 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4902 if (s->fp_excp_el) {
4903 gen_exception_insn(s, 4, EXCP_UDEF,
4904 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4908 if (!s->vfp_enabled)
4910 VFP_DREG_D(rd, insn);
4911 rn = (insn >> 16) & 0xf;
4913 load = (insn & (1 << 21)) != 0;
4914 if ((insn & (1 << 23)) == 0) {
4915 /* Load store all elements. */
4916 op = (insn >> 8) & 0xf;
4917 size = (insn >> 6) & 3;
4920 /* Catch UNDEF cases for bad values of align field */
4923 if (((insn >> 5) & 1) == 1) {
4928 if (((insn >> 4) & 3) == 3) {
4935 nregs = neon_ls_element_type[op].nregs;
4936 interleave = neon_ls_element_type[op].interleave;
4937 spacing = neon_ls_element_type[op].spacing;
4938 if (size == 3 && (interleave | spacing) != 1)
4940 addr = tcg_temp_new_i32();
4941 load_reg_var(s, addr, rn);
4942 stride = (1 << size) * interleave;
4943 for (reg = 0; reg < nregs; reg++) {
4944 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4945 load_reg_var(s, addr, rn);
4946 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4947 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4948 load_reg_var(s, addr, rn);
4949 tcg_gen_addi_i32(addr, addr, 1 << size);
4952 tmp64 = tcg_temp_new_i64();
4954 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4955 neon_store_reg64(tmp64, rd);
4957 neon_load_reg64(tmp64, rd);
4958 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4960 tcg_temp_free_i64(tmp64);
4961 tcg_gen_addi_i32(addr, addr, stride);
4963 for (pass = 0; pass < 2; pass++) {
4966 tmp = tcg_temp_new_i32();
4967 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4968 neon_store_reg(rd, pass, tmp);
4970 tmp = neon_load_reg(rd, pass);
4971 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4972 tcg_temp_free_i32(tmp);
4974 tcg_gen_addi_i32(addr, addr, stride);
4975 } else if (size == 1) {
4977 tmp = tcg_temp_new_i32();
4978 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4979 tcg_gen_addi_i32(addr, addr, stride);
4980 tmp2 = tcg_temp_new_i32();
4981 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4982 tcg_gen_addi_i32(addr, addr, stride);
4983 tcg_gen_shli_i32(tmp2, tmp2, 16);
4984 tcg_gen_or_i32(tmp, tmp, tmp2);
4985 tcg_temp_free_i32(tmp2);
4986 neon_store_reg(rd, pass, tmp);
4988 tmp = neon_load_reg(rd, pass);
4989 tmp2 = tcg_temp_new_i32();
4990 tcg_gen_shri_i32(tmp2, tmp, 16);
4991 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4992 tcg_temp_free_i32(tmp);
4993 tcg_gen_addi_i32(addr, addr, stride);
4994 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
4995 tcg_temp_free_i32(tmp2);
4996 tcg_gen_addi_i32(addr, addr, stride);
4998 } else /* size == 0 */ {
5001 for (n = 0; n < 4; n++) {
5002 tmp = tcg_temp_new_i32();
5003 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
5004 tcg_gen_addi_i32(addr, addr, stride);
5008 tcg_gen_shli_i32(tmp, tmp, n * 8);
5009 tcg_gen_or_i32(tmp2, tmp2, tmp);
5010 tcg_temp_free_i32(tmp);
5013 neon_store_reg(rd, pass, tmp2);
5015 tmp2 = neon_load_reg(rd, pass);
5016 for (n = 0; n < 4; n++) {
5017 tmp = tcg_temp_new_i32();
5019 tcg_gen_mov_i32(tmp, tmp2);
5021 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5023 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
5024 tcg_temp_free_i32(tmp);
5025 tcg_gen_addi_i32(addr, addr, stride);
5027 tcg_temp_free_i32(tmp2);
5034 tcg_temp_free_i32(addr);
5037 size = (insn >> 10) & 3;
5039 /* Load single element to all lanes. */
5040 int a = (insn >> 4) & 1;
5044 size = (insn >> 6) & 3;
5045 nregs = ((insn >> 8) & 3) + 1;
5048 if (nregs != 4 || a == 0) {
5051 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5054 if (nregs == 1 && a == 1 && size == 0) {
5057 if (nregs == 3 && a == 1) {
5060 addr = tcg_temp_new_i32();
5061 load_reg_var(s, addr, rn);
5063 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5064 tmp = gen_load_and_replicate(s, addr, size);
5065 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5066 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5067 if (insn & (1 << 5)) {
5068 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5069 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5071 tcg_temp_free_i32(tmp);
5073 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5074 stride = (insn & (1 << 5)) ? 2 : 1;
5075 for (reg = 0; reg < nregs; reg++) {
5076 tmp = gen_load_and_replicate(s, addr, size);
5077 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5078 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5079 tcg_temp_free_i32(tmp);
5080 tcg_gen_addi_i32(addr, addr, 1 << size);
5084 tcg_temp_free_i32(addr);
5085 stride = (1 << size) * nregs;
5087 /* Single element. */
5088 int idx = (insn >> 4) & 0xf;
5089 pass = (insn >> 7) & 1;
5092 shift = ((insn >> 5) & 3) * 8;
5096 shift = ((insn >> 6) & 1) * 16;
5097 stride = (insn & (1 << 5)) ? 2 : 1;
5101 stride = (insn & (1 << 6)) ? 2 : 1;
5106 nregs = ((insn >> 8) & 3) + 1;
5107 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5110 if (((idx & (1 << size)) != 0) ||
5111 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5116 if ((idx & 1) != 0) {
5121 if (size == 2 && (idx & 2) != 0) {
5126 if ((size == 2) && ((idx & 3) == 3)) {
5133 if ((rd + stride * (nregs - 1)) > 31) {
5134 /* Attempts to write off the end of the register file
5135 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5136 * the neon_load_reg() would write off the end of the array.
5140 addr = tcg_temp_new_i32();
5141 load_reg_var(s, addr, rn);
5142 for (reg = 0; reg < nregs; reg++) {
5144 tmp = tcg_temp_new_i32();
5147 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
5150 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
5153 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
5155 default: /* Avoid compiler warnings. */
5159 tmp2 = neon_load_reg(rd, pass);
5160 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5161 shift, size ? 16 : 8);
5162 tcg_temp_free_i32(tmp2);
5164 neon_store_reg(rd, pass, tmp);
5165 } else { /* Store */
5166 tmp = neon_load_reg(rd, pass);
5168 tcg_gen_shri_i32(tmp, tmp, shift);
5171 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
5174 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5177 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5180 tcg_temp_free_i32(tmp);
5183 tcg_gen_addi_i32(addr, addr, 1 << size);
5185 tcg_temp_free_i32(addr);
5186 stride = nregs * (1 << size);
5192 base = load_reg(s, rn);
5194 tcg_gen_addi_i32(base, base, stride);
5197 index = load_reg(s, rm);
5198 tcg_gen_add_i32(base, base, index);
5199 tcg_temp_free_i32(index);
5201 store_reg(s, rn, base);
5206 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
5207 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
5209 tcg_gen_and_i32(t, t, c);
5210 tcg_gen_andc_i32(f, f, c);
5211 tcg_gen_or_i32(dest, t, f);
5214 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
5217 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5218 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5219 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5224 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5227 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5228 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5229 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5234 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5237 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5238 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5239 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5244 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5247 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5248 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5249 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5254 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5260 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5261 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5266 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5267 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5274 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5275 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5280 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5281 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5288 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5292 case 0: gen_helper_neon_widen_u8(dest, src); break;
5293 case 1: gen_helper_neon_widen_u16(dest, src); break;
5294 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5299 case 0: gen_helper_neon_widen_s8(dest, src); break;
5300 case 1: gen_helper_neon_widen_s16(dest, src); break;
5301 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5305 tcg_temp_free_i32(src);
5308 static inline void gen_neon_addl(int size)
5311 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5312 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5313 case 2: tcg_gen_add_i64(CPU_V001); break;
5318 static inline void gen_neon_subl(int size)
5321 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5322 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5323 case 2: tcg_gen_sub_i64(CPU_V001); break;
5328 static inline void gen_neon_negl(TCGv_i64 var, int size)
5331 case 0: gen_helper_neon_negl_u16(var, var); break;
5332 case 1: gen_helper_neon_negl_u32(var, var); break;
5334 tcg_gen_neg_i64(var, var);
5340 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5343 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5344 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5349 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5354 switch ((size << 1) | u) {
5355 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5356 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5357 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5358 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5360 tmp = gen_muls_i64_i32(a, b);
5361 tcg_gen_mov_i64(dest, tmp);
5362 tcg_temp_free_i64(tmp);
5365 tmp = gen_mulu_i64_i32(a, b);
5366 tcg_gen_mov_i64(dest, tmp);
5367 tcg_temp_free_i64(tmp);
5372 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5373 Don't forget to clean them now. */
5375 tcg_temp_free_i32(a);
5376 tcg_temp_free_i32(b);
5380 static void gen_neon_narrow_op(int op, int u, int size,
5381 TCGv_i32 dest, TCGv_i64 src)
5385 gen_neon_unarrow_sats(size, dest, src);
5387 gen_neon_narrow(size, dest, src);
5391 gen_neon_narrow_satu(size, dest, src);
5393 gen_neon_narrow_sats(size, dest, src);
5398 /* Symbolic constants for op fields for Neon 3-register same-length.
5399 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5402 #define NEON_3R_VHADD 0
5403 #define NEON_3R_VQADD 1
5404 #define NEON_3R_VRHADD 2
5405 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5406 #define NEON_3R_VHSUB 4
5407 #define NEON_3R_VQSUB 5
5408 #define NEON_3R_VCGT 6
5409 #define NEON_3R_VCGE 7
5410 #define NEON_3R_VSHL 8
5411 #define NEON_3R_VQSHL 9
5412 #define NEON_3R_VRSHL 10
5413 #define NEON_3R_VQRSHL 11
5414 #define NEON_3R_VMAX 12
5415 #define NEON_3R_VMIN 13
5416 #define NEON_3R_VABD 14
5417 #define NEON_3R_VABA 15
5418 #define NEON_3R_VADD_VSUB 16
5419 #define NEON_3R_VTST_VCEQ 17
5420 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5421 #define NEON_3R_VMUL 19
5422 #define NEON_3R_VPMAX 20
5423 #define NEON_3R_VPMIN 21
5424 #define NEON_3R_VQDMULH_VQRDMULH 22
5425 #define NEON_3R_VPADD_VQRDMLAH 23
5426 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5427 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
5428 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5429 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5430 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5431 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5432 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5433 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5435 static const uint8_t neon_3r_sizes[] = {
5436 [NEON_3R_VHADD] = 0x7,
5437 [NEON_3R_VQADD] = 0xf,
5438 [NEON_3R_VRHADD] = 0x7,
5439 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5440 [NEON_3R_VHSUB] = 0x7,
5441 [NEON_3R_VQSUB] = 0xf,
5442 [NEON_3R_VCGT] = 0x7,
5443 [NEON_3R_VCGE] = 0x7,
5444 [NEON_3R_VSHL] = 0xf,
5445 [NEON_3R_VQSHL] = 0xf,
5446 [NEON_3R_VRSHL] = 0xf,
5447 [NEON_3R_VQRSHL] = 0xf,
5448 [NEON_3R_VMAX] = 0x7,
5449 [NEON_3R_VMIN] = 0x7,
5450 [NEON_3R_VABD] = 0x7,
5451 [NEON_3R_VABA] = 0x7,
5452 [NEON_3R_VADD_VSUB] = 0xf,
5453 [NEON_3R_VTST_VCEQ] = 0x7,
5454 [NEON_3R_VML] = 0x7,
5455 [NEON_3R_VMUL] = 0x7,
5456 [NEON_3R_VPMAX] = 0x7,
5457 [NEON_3R_VPMIN] = 0x7,
5458 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5459 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
5460 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5461 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
5462 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5463 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5464 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5465 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5466 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5467 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5470 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5471 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5474 #define NEON_2RM_VREV64 0
5475 #define NEON_2RM_VREV32 1
5476 #define NEON_2RM_VREV16 2
5477 #define NEON_2RM_VPADDL 4
5478 #define NEON_2RM_VPADDL_U 5
5479 #define NEON_2RM_AESE 6 /* Includes AESD */
5480 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5481 #define NEON_2RM_VCLS 8
5482 #define NEON_2RM_VCLZ 9
5483 #define NEON_2RM_VCNT 10
5484 #define NEON_2RM_VMVN 11
5485 #define NEON_2RM_VPADAL 12
5486 #define NEON_2RM_VPADAL_U 13
5487 #define NEON_2RM_VQABS 14
5488 #define NEON_2RM_VQNEG 15
5489 #define NEON_2RM_VCGT0 16
5490 #define NEON_2RM_VCGE0 17
5491 #define NEON_2RM_VCEQ0 18
5492 #define NEON_2RM_VCLE0 19
5493 #define NEON_2RM_VCLT0 20
5494 #define NEON_2RM_SHA1H 21
5495 #define NEON_2RM_VABS 22
5496 #define NEON_2RM_VNEG 23
5497 #define NEON_2RM_VCGT0_F 24
5498 #define NEON_2RM_VCGE0_F 25
5499 #define NEON_2RM_VCEQ0_F 26
5500 #define NEON_2RM_VCLE0_F 27
5501 #define NEON_2RM_VCLT0_F 28
5502 #define NEON_2RM_VABS_F 30
5503 #define NEON_2RM_VNEG_F 31
5504 #define NEON_2RM_VSWP 32
5505 #define NEON_2RM_VTRN 33
5506 #define NEON_2RM_VUZP 34
5507 #define NEON_2RM_VZIP 35
5508 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5509 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5510 #define NEON_2RM_VSHLL 38
5511 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5512 #define NEON_2RM_VRINTN 40
5513 #define NEON_2RM_VRINTX 41
5514 #define NEON_2RM_VRINTA 42
5515 #define NEON_2RM_VRINTZ 43
5516 #define NEON_2RM_VCVT_F16_F32 44
5517 #define NEON_2RM_VRINTM 45
5518 #define NEON_2RM_VCVT_F32_F16 46
5519 #define NEON_2RM_VRINTP 47
5520 #define NEON_2RM_VCVTAU 48
5521 #define NEON_2RM_VCVTAS 49
5522 #define NEON_2RM_VCVTNU 50
5523 #define NEON_2RM_VCVTNS 51
5524 #define NEON_2RM_VCVTPU 52
5525 #define NEON_2RM_VCVTPS 53
5526 #define NEON_2RM_VCVTMU 54
5527 #define NEON_2RM_VCVTMS 55
5528 #define NEON_2RM_VRECPE 56
5529 #define NEON_2RM_VRSQRTE 57
5530 #define NEON_2RM_VRECPE_F 58
5531 #define NEON_2RM_VRSQRTE_F 59
5532 #define NEON_2RM_VCVT_FS 60
5533 #define NEON_2RM_VCVT_FU 61
5534 #define NEON_2RM_VCVT_SF 62
5535 #define NEON_2RM_VCVT_UF 63
5537 static int neon_2rm_is_float_op(int op)
5539 /* Return true if this neon 2reg-misc op is float-to-float */
5540 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5541 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5542 op == NEON_2RM_VRINTM ||
5543 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5544 op >= NEON_2RM_VRECPE_F);
5547 static bool neon_2rm_is_v8_op(int op)
5549 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5551 case NEON_2RM_VRINTN:
5552 case NEON_2RM_VRINTA:
5553 case NEON_2RM_VRINTM:
5554 case NEON_2RM_VRINTP:
5555 case NEON_2RM_VRINTZ:
5556 case NEON_2RM_VRINTX:
5557 case NEON_2RM_VCVTAU:
5558 case NEON_2RM_VCVTAS:
5559 case NEON_2RM_VCVTNU:
5560 case NEON_2RM_VCVTNS:
5561 case NEON_2RM_VCVTPU:
5562 case NEON_2RM_VCVTPS:
5563 case NEON_2RM_VCVTMU:
5564 case NEON_2RM_VCVTMS:
5571 /* Each entry in this array has bit n set if the insn allows
5572 * size value n (otherwise it will UNDEF). Since unallocated
5573 * op values will have no bits set they always UNDEF.
5575 static const uint8_t neon_2rm_sizes[] = {
5576 [NEON_2RM_VREV64] = 0x7,
5577 [NEON_2RM_VREV32] = 0x3,
5578 [NEON_2RM_VREV16] = 0x1,
5579 [NEON_2RM_VPADDL] = 0x7,
5580 [NEON_2RM_VPADDL_U] = 0x7,
5581 [NEON_2RM_AESE] = 0x1,
5582 [NEON_2RM_AESMC] = 0x1,
5583 [NEON_2RM_VCLS] = 0x7,
5584 [NEON_2RM_VCLZ] = 0x7,
5585 [NEON_2RM_VCNT] = 0x1,
5586 [NEON_2RM_VMVN] = 0x1,
5587 [NEON_2RM_VPADAL] = 0x7,
5588 [NEON_2RM_VPADAL_U] = 0x7,
5589 [NEON_2RM_VQABS] = 0x7,
5590 [NEON_2RM_VQNEG] = 0x7,
5591 [NEON_2RM_VCGT0] = 0x7,
5592 [NEON_2RM_VCGE0] = 0x7,
5593 [NEON_2RM_VCEQ0] = 0x7,
5594 [NEON_2RM_VCLE0] = 0x7,
5595 [NEON_2RM_VCLT0] = 0x7,
5596 [NEON_2RM_SHA1H] = 0x4,
5597 [NEON_2RM_VABS] = 0x7,
5598 [NEON_2RM_VNEG] = 0x7,
5599 [NEON_2RM_VCGT0_F] = 0x4,
5600 [NEON_2RM_VCGE0_F] = 0x4,
5601 [NEON_2RM_VCEQ0_F] = 0x4,
5602 [NEON_2RM_VCLE0_F] = 0x4,
5603 [NEON_2RM_VCLT0_F] = 0x4,
5604 [NEON_2RM_VABS_F] = 0x4,
5605 [NEON_2RM_VNEG_F] = 0x4,
5606 [NEON_2RM_VSWP] = 0x1,
5607 [NEON_2RM_VTRN] = 0x7,
5608 [NEON_2RM_VUZP] = 0x7,
5609 [NEON_2RM_VZIP] = 0x7,
5610 [NEON_2RM_VMOVN] = 0x7,
5611 [NEON_2RM_VQMOVN] = 0x7,
5612 [NEON_2RM_VSHLL] = 0x7,
5613 [NEON_2RM_SHA1SU1] = 0x4,
5614 [NEON_2RM_VRINTN] = 0x4,
5615 [NEON_2RM_VRINTX] = 0x4,
5616 [NEON_2RM_VRINTA] = 0x4,
5617 [NEON_2RM_VRINTZ] = 0x4,
5618 [NEON_2RM_VCVT_F16_F32] = 0x2,
5619 [NEON_2RM_VRINTM] = 0x4,
5620 [NEON_2RM_VCVT_F32_F16] = 0x2,
5621 [NEON_2RM_VRINTP] = 0x4,
5622 [NEON_2RM_VCVTAU] = 0x4,
5623 [NEON_2RM_VCVTAS] = 0x4,
5624 [NEON_2RM_VCVTNU] = 0x4,
5625 [NEON_2RM_VCVTNS] = 0x4,
5626 [NEON_2RM_VCVTPU] = 0x4,
5627 [NEON_2RM_VCVTPS] = 0x4,
5628 [NEON_2RM_VCVTMU] = 0x4,
5629 [NEON_2RM_VCVTMS] = 0x4,
5630 [NEON_2RM_VRECPE] = 0x4,
5631 [NEON_2RM_VRSQRTE] = 0x4,
5632 [NEON_2RM_VRECPE_F] = 0x4,
5633 [NEON_2RM_VRSQRTE_F] = 0x4,
5634 [NEON_2RM_VCVT_FS] = 0x4,
5635 [NEON_2RM_VCVT_FU] = 0x4,
5636 [NEON_2RM_VCVT_SF] = 0x4,
5637 [NEON_2RM_VCVT_UF] = 0x4,
5641 /* Expand v8.1 simd helper. */
5642 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5643 int q, int rd, int rn, int rm)
5645 if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
5646 int opr_sz = (1 + q) * 8;
5647 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5648 vfp_reg_offset(1, rn),
5649 vfp_reg_offset(1, rm), cpu_env,
5650 opr_sz, opr_sz, 0, fn);
5656 /* Translate a NEON data processing instruction. Return nonzero if the
5657 instruction is invalid.
5658 We process data in a mixture of 32-bit and 64-bit chunks.
5659 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5661 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5673 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5674 TCGv_ptr ptr1, ptr2, ptr3;
5677 /* FIXME: this access check should not take precedence over UNDEF
5678 * for invalid encodings; we will generate incorrect syndrome information
5679 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5681 if (s->fp_excp_el) {
5682 gen_exception_insn(s, 4, EXCP_UDEF,
5683 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5687 if (!s->vfp_enabled)
5689 q = (insn & (1 << 6)) != 0;
5690 u = (insn >> 24) & 1;
5691 VFP_DREG_D(rd, insn);
5692 VFP_DREG_N(rn, insn);
5693 VFP_DREG_M(rm, insn);
5694 size = (insn >> 20) & 3;
5695 if ((insn & (1 << 23)) == 0) {
5696 /* Three register same length. */
5697 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5698 /* Catch invalid op and bad size combinations: UNDEF */
5699 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5702 /* All insns of this form UNDEF for either this condition or the
5703 * superset of cases "Q==1"; we catch the latter later.
5705 if (q && ((rd | rn | rm) & 1)) {
5710 /* The SHA-1/SHA-256 3-register instructions require special
5711 * treatment here, as their size field is overloaded as an
5712 * op type selector, and they all consume their input in a
5718 if (!u) { /* SHA-1 */
5719 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5722 ptr1 = vfp_reg_ptr(true, rd);
5723 ptr2 = vfp_reg_ptr(true, rn);
5724 ptr3 = vfp_reg_ptr(true, rm);
5725 tmp4 = tcg_const_i32(size);
5726 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
5727 tcg_temp_free_i32(tmp4);
5728 } else { /* SHA-256 */
5729 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5732 ptr1 = vfp_reg_ptr(true, rd);
5733 ptr2 = vfp_reg_ptr(true, rn);
5734 ptr3 = vfp_reg_ptr(true, rm);
5737 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
5740 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
5743 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
5747 tcg_temp_free_ptr(ptr1);
5748 tcg_temp_free_ptr(ptr2);
5749 tcg_temp_free_ptr(ptr3);
5752 case NEON_3R_VPADD_VQRDMLAH:
5759 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5762 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5767 case NEON_3R_VFM_VQRDMLSH:
5778 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5781 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5786 if (size == 3 && op != NEON_3R_LOGIC) {
5787 /* 64-bit element instructions. */
5788 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5789 neon_load_reg64(cpu_V0, rn + pass);
5790 neon_load_reg64(cpu_V1, rm + pass);
5794 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5797 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5803 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5806 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5812 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5814 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5819 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5822 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5828 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5830 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5833 case NEON_3R_VQRSHL:
5835 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5838 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5842 case NEON_3R_VADD_VSUB:
5844 tcg_gen_sub_i64(CPU_V001);
5846 tcg_gen_add_i64(CPU_V001);
5852 neon_store_reg64(cpu_V0, rd + pass);
5861 case NEON_3R_VQRSHL:
5864 /* Shift instruction operands are reversed. */
5870 case NEON_3R_VPADD_VQRDMLAH:
5875 case NEON_3R_FLOAT_ARITH:
5876 pairwise = (u && size < 2); /* if VPADD (float) */
5878 case NEON_3R_FLOAT_MINMAX:
5879 pairwise = u; /* if VPMIN/VPMAX (float) */
5881 case NEON_3R_FLOAT_CMP:
5883 /* no encoding for U=0 C=1x */
5887 case NEON_3R_FLOAT_ACMP:
5892 case NEON_3R_FLOAT_MISC:
5893 /* VMAXNM/VMINNM in ARMv8 */
5894 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5899 if (u && (size != 0)) {
5900 /* UNDEF on invalid size for polynomial subcase */
5904 case NEON_3R_VFM_VQRDMLSH:
5905 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
5913 if (pairwise && q) {
5914 /* All the pairwise insns UNDEF if Q is set */
5918 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5923 tmp = neon_load_reg(rn, 0);
5924 tmp2 = neon_load_reg(rn, 1);
5926 tmp = neon_load_reg(rm, 0);
5927 tmp2 = neon_load_reg(rm, 1);
5931 tmp = neon_load_reg(rn, pass);
5932 tmp2 = neon_load_reg(rm, pass);
5936 GEN_NEON_INTEGER_OP(hadd);
5939 GEN_NEON_INTEGER_OP_ENV(qadd);
5941 case NEON_3R_VRHADD:
5942 GEN_NEON_INTEGER_OP(rhadd);
5944 case NEON_3R_LOGIC: /* Logic ops. */
5945 switch ((u << 2) | size) {
5947 tcg_gen_and_i32(tmp, tmp, tmp2);
5950 tcg_gen_andc_i32(tmp, tmp, tmp2);
5953 tcg_gen_or_i32(tmp, tmp, tmp2);
5956 tcg_gen_orc_i32(tmp, tmp, tmp2);
5959 tcg_gen_xor_i32(tmp, tmp, tmp2);
5962 tmp3 = neon_load_reg(rd, pass);
5963 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5964 tcg_temp_free_i32(tmp3);
5967 tmp3 = neon_load_reg(rd, pass);
5968 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5969 tcg_temp_free_i32(tmp3);
5972 tmp3 = neon_load_reg(rd, pass);
5973 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5974 tcg_temp_free_i32(tmp3);
5979 GEN_NEON_INTEGER_OP(hsub);
5982 GEN_NEON_INTEGER_OP_ENV(qsub);
5985 GEN_NEON_INTEGER_OP(cgt);
5988 GEN_NEON_INTEGER_OP(cge);
5991 GEN_NEON_INTEGER_OP(shl);
5994 GEN_NEON_INTEGER_OP_ENV(qshl);
5997 GEN_NEON_INTEGER_OP(rshl);
5999 case NEON_3R_VQRSHL:
6000 GEN_NEON_INTEGER_OP_ENV(qrshl);
6003 GEN_NEON_INTEGER_OP(max);
6006 GEN_NEON_INTEGER_OP(min);
6009 GEN_NEON_INTEGER_OP(abd);
6012 GEN_NEON_INTEGER_OP(abd);
6013 tcg_temp_free_i32(tmp2);
6014 tmp2 = neon_load_reg(rd, pass);
6015 gen_neon_add(size, tmp, tmp2);
6017 case NEON_3R_VADD_VSUB:
6018 if (!u) { /* VADD */
6019 gen_neon_add(size, tmp, tmp2);
6022 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
6023 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
6024 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
6029 case NEON_3R_VTST_VCEQ:
6030 if (!u) { /* VTST */
6032 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6033 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6034 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
6039 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6040 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6041 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6046 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
6048 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6049 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6050 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6053 tcg_temp_free_i32(tmp2);
6054 tmp2 = neon_load_reg(rd, pass);
6056 gen_neon_rsb(size, tmp, tmp2);
6058 gen_neon_add(size, tmp, tmp2);
6062 if (u) { /* polynomial */
6063 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
6064 } else { /* Integer */
6066 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6067 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6068 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6074 GEN_NEON_INTEGER_OP(pmax);
6077 GEN_NEON_INTEGER_OP(pmin);
6079 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
6080 if (!u) { /* VQDMULH */
6083 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6086 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6090 } else { /* VQRDMULH */
6093 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6096 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6102 case NEON_3R_VPADD_VQRDMLAH:
6104 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6105 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6106 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
6110 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
6112 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6113 switch ((u << 2) | size) {
6116 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6119 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
6122 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
6127 tcg_temp_free_ptr(fpstatus);
6130 case NEON_3R_FLOAT_MULTIPLY:
6132 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6133 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6135 tcg_temp_free_i32(tmp2);
6136 tmp2 = neon_load_reg(rd, pass);
6138 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6140 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6143 tcg_temp_free_ptr(fpstatus);
6146 case NEON_3R_FLOAT_CMP:
6148 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6150 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6153 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6155 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6158 tcg_temp_free_ptr(fpstatus);
6161 case NEON_3R_FLOAT_ACMP:
6163 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6165 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6167 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6169 tcg_temp_free_ptr(fpstatus);
6172 case NEON_3R_FLOAT_MINMAX:
6174 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6176 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
6178 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
6180 tcg_temp_free_ptr(fpstatus);
6183 case NEON_3R_FLOAT_MISC:
6186 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6188 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
6190 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
6192 tcg_temp_free_ptr(fpstatus);
6195 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6197 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6201 case NEON_3R_VFM_VQRDMLSH:
6203 /* VFMA, VFMS: fused multiply-add */
6204 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6205 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6208 gen_helper_vfp_negs(tmp, tmp);
6210 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6211 tcg_temp_free_i32(tmp3);
6212 tcg_temp_free_ptr(fpstatus);
6218 tcg_temp_free_i32(tmp2);
6220 /* Save the result. For elementwise operations we can put it
6221 straight into the destination register. For pairwise operations
6222 we have to be careful to avoid clobbering the source operands. */
6223 if (pairwise && rd == rm) {
6224 neon_store_scratch(pass, tmp);
6226 neon_store_reg(rd, pass, tmp);
6230 if (pairwise && rd == rm) {
6231 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6232 tmp = neon_load_scratch(pass);
6233 neon_store_reg(rd, pass, tmp);
6236 /* End of 3 register same size operations. */
6237 } else if (insn & (1 << 4)) {
6238 if ((insn & 0x00380080) != 0) {
6239 /* Two registers and shift. */
6240 op = (insn >> 8) & 0xf;
6241 if (insn & (1 << 7)) {
6249 while ((insn & (1 << (size + 19))) == 0)
6252 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
6253 /* To avoid excessive duplication of ops we implement shift
6254 by immediate using the variable shift operations. */
6256 /* Shift by immediate:
6257 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6258 if (q && ((rd | rm) & 1)) {
6261 if (!u && (op == 4 || op == 6)) {
6264 /* Right shifts are encoded as N - shift, where N is the
6265 element size in bits. */
6267 shift = shift - (1 << (size + 3));
6275 imm = (uint8_t) shift;
6280 imm = (uint16_t) shift;
6291 for (pass = 0; pass < count; pass++) {
6293 neon_load_reg64(cpu_V0, rm + pass);
6294 tcg_gen_movi_i64(cpu_V1, imm);
6299 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6301 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
6306 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6308 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6311 case 5: /* VSHL, VSLI */
6312 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6314 case 6: /* VQSHLU */
6315 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6320 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6323 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6328 if (op == 1 || op == 3) {
6330 neon_load_reg64(cpu_V1, rd + pass);
6331 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6332 } else if (op == 4 || (op == 5 && u)) {
6334 neon_load_reg64(cpu_V1, rd + pass);
6336 if (shift < -63 || shift > 63) {
6340 mask = 0xffffffffffffffffull >> -shift;
6342 mask = 0xffffffffffffffffull << shift;
6345 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6346 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6348 neon_store_reg64(cpu_V0, rd + pass);
6349 } else { /* size < 3 */
6350 /* Operands in T0 and T1. */
6351 tmp = neon_load_reg(rm, pass);
6352 tmp2 = tcg_temp_new_i32();
6353 tcg_gen_movi_i32(tmp2, imm);
6357 GEN_NEON_INTEGER_OP(shl);
6361 GEN_NEON_INTEGER_OP(rshl);
6364 case 5: /* VSHL, VSLI */
6366 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6367 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6368 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6372 case 6: /* VQSHLU */
6375 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6379 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6383 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6391 GEN_NEON_INTEGER_OP_ENV(qshl);
6394 tcg_temp_free_i32(tmp2);
6396 if (op == 1 || op == 3) {
6398 tmp2 = neon_load_reg(rd, pass);
6399 gen_neon_add(size, tmp, tmp2);
6400 tcg_temp_free_i32(tmp2);
6401 } else if (op == 4 || (op == 5 && u)) {
6406 mask = 0xff >> -shift;
6408 mask = (uint8_t)(0xff << shift);
6414 mask = 0xffff >> -shift;
6416 mask = (uint16_t)(0xffff << shift);
6420 if (shift < -31 || shift > 31) {
6424 mask = 0xffffffffu >> -shift;
6426 mask = 0xffffffffu << shift;
6432 tmp2 = neon_load_reg(rd, pass);
6433 tcg_gen_andi_i32(tmp, tmp, mask);
6434 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6435 tcg_gen_or_i32(tmp, tmp, tmp2);
6436 tcg_temp_free_i32(tmp2);
6438 neon_store_reg(rd, pass, tmp);
6441 } else if (op < 10) {
6442 /* Shift by immediate and narrow:
6443 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6444 int input_unsigned = (op == 8) ? !u : u;
6448 shift = shift - (1 << (size + 3));
6451 tmp64 = tcg_const_i64(shift);
6452 neon_load_reg64(cpu_V0, rm);
6453 neon_load_reg64(cpu_V1, rm + 1);
6454 for (pass = 0; pass < 2; pass++) {
6462 if (input_unsigned) {
6463 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6465 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6468 if (input_unsigned) {
6469 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6471 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6474 tmp = tcg_temp_new_i32();
6475 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6476 neon_store_reg(rd, pass, tmp);
6478 tcg_temp_free_i64(tmp64);
6481 imm = (uint16_t)shift;
6485 imm = (uint32_t)shift;
6487 tmp2 = tcg_const_i32(imm);
6488 tmp4 = neon_load_reg(rm + 1, 0);
6489 tmp5 = neon_load_reg(rm + 1, 1);
6490 for (pass = 0; pass < 2; pass++) {
6492 tmp = neon_load_reg(rm, 0);
6496 gen_neon_shift_narrow(size, tmp, tmp2, q,
6499 tmp3 = neon_load_reg(rm, 1);
6503 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6505 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6506 tcg_temp_free_i32(tmp);
6507 tcg_temp_free_i32(tmp3);
6508 tmp = tcg_temp_new_i32();
6509 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6510 neon_store_reg(rd, pass, tmp);
6512 tcg_temp_free_i32(tmp2);
6514 } else if (op == 10) {
6516 if (q || (rd & 1)) {
6519 tmp = neon_load_reg(rm, 0);
6520 tmp2 = neon_load_reg(rm, 1);
6521 for (pass = 0; pass < 2; pass++) {
6525 gen_neon_widen(cpu_V0, tmp, size, u);
6528 /* The shift is less than the width of the source
6529 type, so we can just shift the whole register. */
6530 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6531 /* Widen the result of shift: we need to clear
6532 * the potential overflow bits resulting from
6533 * left bits of the narrow input appearing as
6534 * right bits of left the neighbour narrow
6536 if (size < 2 || !u) {
6539 imm = (0xffu >> (8 - shift));
6541 } else if (size == 1) {
6542 imm = 0xffff >> (16 - shift);
6545 imm = 0xffffffff >> (32 - shift);
6548 imm64 = imm | (((uint64_t)imm) << 32);
6552 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6555 neon_store_reg64(cpu_V0, rd + pass);
6557 } else if (op >= 14) {
6558 /* VCVT fixed-point. */
6559 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6562 /* We have already masked out the must-be-1 top bit of imm6,
6563 * hence this 32-shift where the ARM ARM has 64-imm6.
6566 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6567 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6570 gen_vfp_ulto(0, shift, 1);
6572 gen_vfp_slto(0, shift, 1);
6575 gen_vfp_toul(0, shift, 1);
6577 gen_vfp_tosl(0, shift, 1);
6579 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6584 } else { /* (insn & 0x00380080) == 0 */
6586 if (q && (rd & 1)) {
6590 op = (insn >> 8) & 0xf;
6591 /* One register and immediate. */
6592 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6593 invert = (insn & (1 << 5)) != 0;
6594 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6595 * We choose to not special-case this and will behave as if a
6596 * valid constant encoding of 0 had been given.
6615 imm = (imm << 8) | (imm << 24);
6618 imm = (imm << 8) | 0xff;
6621 imm = (imm << 16) | 0xffff;
6624 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6632 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6633 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6639 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6640 if (op & 1 && op < 12) {
6641 tmp = neon_load_reg(rd, pass);
6643 /* The immediate value has already been inverted, so
6645 tcg_gen_andi_i32(tmp, tmp, imm);
6647 tcg_gen_ori_i32(tmp, tmp, imm);
6651 tmp = tcg_temp_new_i32();
6652 if (op == 14 && invert) {
6656 for (n = 0; n < 4; n++) {
6657 if (imm & (1 << (n + (pass & 1) * 4)))
6658 val |= 0xff << (n * 8);
6660 tcg_gen_movi_i32(tmp, val);
6662 tcg_gen_movi_i32(tmp, imm);
6665 neon_store_reg(rd, pass, tmp);
6668 } else { /* (insn & 0x00800010 == 0x00800000) */
6670 op = (insn >> 8) & 0xf;
6671 if ((insn & (1 << 6)) == 0) {
6672 /* Three registers of different lengths. */
6676 /* undefreq: bit 0 : UNDEF if size == 0
6677 * bit 1 : UNDEF if size == 1
6678 * bit 2 : UNDEF if size == 2
6679 * bit 3 : UNDEF if U == 1
6680 * Note that [2:0] set implies 'always UNDEF'
6683 /* prewiden, src1_wide, src2_wide, undefreq */
6684 static const int neon_3reg_wide[16][4] = {
6685 {1, 0, 0, 0}, /* VADDL */
6686 {1, 1, 0, 0}, /* VADDW */
6687 {1, 0, 0, 0}, /* VSUBL */
6688 {1, 1, 0, 0}, /* VSUBW */
6689 {0, 1, 1, 0}, /* VADDHN */
6690 {0, 0, 0, 0}, /* VABAL */
6691 {0, 1, 1, 0}, /* VSUBHN */
6692 {0, 0, 0, 0}, /* VABDL */
6693 {0, 0, 0, 0}, /* VMLAL */
6694 {0, 0, 0, 9}, /* VQDMLAL */
6695 {0, 0, 0, 0}, /* VMLSL */
6696 {0, 0, 0, 9}, /* VQDMLSL */
6697 {0, 0, 0, 0}, /* Integer VMULL */
6698 {0, 0, 0, 1}, /* VQDMULL */
6699 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6700 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6703 prewiden = neon_3reg_wide[op][0];
6704 src1_wide = neon_3reg_wide[op][1];
6705 src2_wide = neon_3reg_wide[op][2];
6706 undefreq = neon_3reg_wide[op][3];
6708 if ((undefreq & (1 << size)) ||
6709 ((undefreq & 8) && u)) {
6712 if ((src1_wide && (rn & 1)) ||
6713 (src2_wide && (rm & 1)) ||
6714 (!src2_wide && (rd & 1))) {
6718 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6719 * outside the loop below as it only performs a single pass.
6721 if (op == 14 && size == 2) {
6722 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6724 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6727 tcg_rn = tcg_temp_new_i64();
6728 tcg_rm = tcg_temp_new_i64();
6729 tcg_rd = tcg_temp_new_i64();
6730 neon_load_reg64(tcg_rn, rn);
6731 neon_load_reg64(tcg_rm, rm);
6732 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6733 neon_store_reg64(tcg_rd, rd);
6734 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6735 neon_store_reg64(tcg_rd, rd + 1);
6736 tcg_temp_free_i64(tcg_rn);
6737 tcg_temp_free_i64(tcg_rm);
6738 tcg_temp_free_i64(tcg_rd);
6742 /* Avoid overlapping operands. Wide source operands are
6743 always aligned so will never overlap with wide
6744 destinations in problematic ways. */
6745 if (rd == rm && !src2_wide) {
6746 tmp = neon_load_reg(rm, 1);
6747 neon_store_scratch(2, tmp);
6748 } else if (rd == rn && !src1_wide) {
6749 tmp = neon_load_reg(rn, 1);
6750 neon_store_scratch(2, tmp);
6753 for (pass = 0; pass < 2; pass++) {
6755 neon_load_reg64(cpu_V0, rn + pass);
6758 if (pass == 1 && rd == rn) {
6759 tmp = neon_load_scratch(2);
6761 tmp = neon_load_reg(rn, pass);
6764 gen_neon_widen(cpu_V0, tmp, size, u);
6768 neon_load_reg64(cpu_V1, rm + pass);
6771 if (pass == 1 && rd == rm) {
6772 tmp2 = neon_load_scratch(2);
6774 tmp2 = neon_load_reg(rm, pass);
6777 gen_neon_widen(cpu_V1, tmp2, size, u);
6781 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6782 gen_neon_addl(size);
6784 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6785 gen_neon_subl(size);
6787 case 5: case 7: /* VABAL, VABDL */
6788 switch ((size << 1) | u) {
6790 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6793 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6796 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6799 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6802 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6805 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6809 tcg_temp_free_i32(tmp2);
6810 tcg_temp_free_i32(tmp);
6812 case 8: case 9: case 10: case 11: case 12: case 13:
6813 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6814 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6816 case 14: /* Polynomial VMULL */
6817 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6818 tcg_temp_free_i32(tmp2);
6819 tcg_temp_free_i32(tmp);
6821 default: /* 15 is RESERVED: caught earlier */
6826 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6827 neon_store_reg64(cpu_V0, rd + pass);
6828 } else if (op == 5 || (op >= 8 && op <= 11)) {
6830 neon_load_reg64(cpu_V1, rd + pass);
6832 case 10: /* VMLSL */
6833 gen_neon_negl(cpu_V0, size);
6835 case 5: case 8: /* VABAL, VMLAL */
6836 gen_neon_addl(size);
6838 case 9: case 11: /* VQDMLAL, VQDMLSL */
6839 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6841 gen_neon_negl(cpu_V0, size);
6843 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6848 neon_store_reg64(cpu_V0, rd + pass);
6849 } else if (op == 4 || op == 6) {
6850 /* Narrowing operation. */
6851 tmp = tcg_temp_new_i32();
6855 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6858 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6861 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6862 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6869 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6872 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6875 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6876 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6877 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6885 neon_store_reg(rd, 0, tmp3);
6886 neon_store_reg(rd, 1, tmp);
6889 /* Write back the result. */
6890 neon_store_reg64(cpu_V0, rd + pass);
6894 /* Two registers and a scalar. NB that for ops of this form
6895 * the ARM ARM labels bit 24 as Q, but it is in our variable
6902 case 1: /* Float VMLA scalar */
6903 case 5: /* Floating point VMLS scalar */
6904 case 9: /* Floating point VMUL scalar */
6909 case 0: /* Integer VMLA scalar */
6910 case 4: /* Integer VMLS scalar */
6911 case 8: /* Integer VMUL scalar */
6912 case 12: /* VQDMULH scalar */
6913 case 13: /* VQRDMULH scalar */
6914 if (u && ((rd | rn) & 1)) {
6917 tmp = neon_get_scalar(size, rm);
6918 neon_store_scratch(0, tmp);
6919 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6920 tmp = neon_load_scratch(0);
6921 tmp2 = neon_load_reg(rn, pass);
6924 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6926 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6928 } else if (op == 13) {
6930 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6932 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6934 } else if (op & 1) {
6935 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6936 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6937 tcg_temp_free_ptr(fpstatus);
6940 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6941 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6942 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6946 tcg_temp_free_i32(tmp2);
6949 tmp2 = neon_load_reg(rd, pass);
6952 gen_neon_add(size, tmp, tmp2);
6956 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6957 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6958 tcg_temp_free_ptr(fpstatus);
6962 gen_neon_rsb(size, tmp, tmp2);
6966 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6967 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6968 tcg_temp_free_ptr(fpstatus);
6974 tcg_temp_free_i32(tmp2);
6976 neon_store_reg(rd, pass, tmp);
6979 case 3: /* VQDMLAL scalar */
6980 case 7: /* VQDMLSL scalar */
6981 case 11: /* VQDMULL scalar */
6986 case 2: /* VMLAL sclar */
6987 case 6: /* VMLSL scalar */
6988 case 10: /* VMULL scalar */
6992 tmp2 = neon_get_scalar(size, rm);
6993 /* We need a copy of tmp2 because gen_neon_mull
6994 * deletes it during pass 0. */
6995 tmp4 = tcg_temp_new_i32();
6996 tcg_gen_mov_i32(tmp4, tmp2);
6997 tmp3 = neon_load_reg(rn, 1);
6999 for (pass = 0; pass < 2; pass++) {
7001 tmp = neon_load_reg(rn, 0);
7006 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
7008 neon_load_reg64(cpu_V1, rd + pass);
7012 gen_neon_negl(cpu_V0, size);
7015 gen_neon_addl(size);
7018 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7020 gen_neon_negl(cpu_V0, size);
7022 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7028 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7033 neon_store_reg64(cpu_V0, rd + pass);
7036 case 14: /* VQRDMLAH scalar */
7037 case 15: /* VQRDMLSH scalar */
7039 NeonGenThreeOpEnvFn *fn;
7041 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
7044 if (u && ((rd | rn) & 1)) {
7049 fn = gen_helper_neon_qrdmlah_s16;
7051 fn = gen_helper_neon_qrdmlah_s32;
7055 fn = gen_helper_neon_qrdmlsh_s16;
7057 fn = gen_helper_neon_qrdmlsh_s32;
7061 tmp2 = neon_get_scalar(size, rm);
7062 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7063 tmp = neon_load_reg(rn, pass);
7064 tmp3 = neon_load_reg(rd, pass);
7065 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7066 tcg_temp_free_i32(tmp3);
7067 neon_store_reg(rd, pass, tmp);
7069 tcg_temp_free_i32(tmp2);
7073 g_assert_not_reached();
7076 } else { /* size == 3 */
7079 imm = (insn >> 8) & 0xf;
7084 if (q && ((rd | rn | rm) & 1)) {
7089 neon_load_reg64(cpu_V0, rn);
7091 neon_load_reg64(cpu_V1, rn + 1);
7093 } else if (imm == 8) {
7094 neon_load_reg64(cpu_V0, rn + 1);
7096 neon_load_reg64(cpu_V1, rm);
7099 tmp64 = tcg_temp_new_i64();
7101 neon_load_reg64(cpu_V0, rn);
7102 neon_load_reg64(tmp64, rn + 1);
7104 neon_load_reg64(cpu_V0, rn + 1);
7105 neon_load_reg64(tmp64, rm);
7107 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
7108 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
7109 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7111 neon_load_reg64(cpu_V1, rm);
7113 neon_load_reg64(cpu_V1, rm + 1);
7116 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7117 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7118 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
7119 tcg_temp_free_i64(tmp64);
7122 neon_load_reg64(cpu_V0, rn);
7123 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
7124 neon_load_reg64(cpu_V1, rm);
7125 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7126 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7128 neon_store_reg64(cpu_V0, rd);
7130 neon_store_reg64(cpu_V1, rd + 1);
7132 } else if ((insn & (1 << 11)) == 0) {
7133 /* Two register misc. */
7134 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7135 size = (insn >> 18) & 3;
7136 /* UNDEF for unknown op values and bad op-size combinations */
7137 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7140 if (neon_2rm_is_v8_op(op) &&
7141 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7144 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7145 q && ((rm | rd) & 1)) {
7149 case NEON_2RM_VREV64:
7150 for (pass = 0; pass < (q ? 2 : 1); pass++) {
7151 tmp = neon_load_reg(rm, pass * 2);
7152 tmp2 = neon_load_reg(rm, pass * 2 + 1);
7154 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7155 case 1: gen_swap_half(tmp); break;
7156 case 2: /* no-op */ break;
7159 neon_store_reg(rd, pass * 2 + 1, tmp);
7161 neon_store_reg(rd, pass * 2, tmp2);
7164 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7165 case 1: gen_swap_half(tmp2); break;
7168 neon_store_reg(rd, pass * 2, tmp2);
7172 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7173 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
7174 for (pass = 0; pass < q + 1; pass++) {
7175 tmp = neon_load_reg(rm, pass * 2);
7176 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7177 tmp = neon_load_reg(rm, pass * 2 + 1);
7178 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7180 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7181 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7182 case 2: tcg_gen_add_i64(CPU_V001); break;
7185 if (op >= NEON_2RM_VPADAL) {
7187 neon_load_reg64(cpu_V1, rd + pass);
7188 gen_neon_addl(size);
7190 neon_store_reg64(cpu_V0, rd + pass);
7196 for (n = 0; n < (q ? 4 : 2); n += 2) {
7197 tmp = neon_load_reg(rm, n);
7198 tmp2 = neon_load_reg(rd, n + 1);
7199 neon_store_reg(rm, n, tmp2);
7200 neon_store_reg(rd, n + 1, tmp);
7207 if (gen_neon_unzip(rd, rm, size, q)) {
7212 if (gen_neon_zip(rd, rm, size, q)) {
7216 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7217 /* also VQMOVUN; op field and mnemonics don't line up */
7222 for (pass = 0; pass < 2; pass++) {
7223 neon_load_reg64(cpu_V0, rm + pass);
7224 tmp = tcg_temp_new_i32();
7225 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7230 neon_store_reg(rd, 0, tmp2);
7231 neon_store_reg(rd, 1, tmp);
7235 case NEON_2RM_VSHLL:
7236 if (q || (rd & 1)) {
7239 tmp = neon_load_reg(rm, 0);
7240 tmp2 = neon_load_reg(rm, 1);
7241 for (pass = 0; pass < 2; pass++) {
7244 gen_neon_widen(cpu_V0, tmp, size, 1);
7245 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
7246 neon_store_reg64(cpu_V0, rd + pass);
7249 case NEON_2RM_VCVT_F16_F32:
7254 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7258 tmp = tcg_temp_new_i32();
7259 tmp2 = tcg_temp_new_i32();
7260 fpst = get_fpstatus_ptr(true);
7261 ahp = get_ahp_flag();
7262 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
7263 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7264 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
7265 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7266 tcg_gen_shli_i32(tmp2, tmp2, 16);
7267 tcg_gen_or_i32(tmp2, tmp2, tmp);
7268 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
7269 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7270 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7271 neon_store_reg(rd, 0, tmp2);
7272 tmp2 = tcg_temp_new_i32();
7273 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7274 tcg_gen_shli_i32(tmp2, tmp2, 16);
7275 tcg_gen_or_i32(tmp2, tmp2, tmp);
7276 neon_store_reg(rd, 1, tmp2);
7277 tcg_temp_free_i32(tmp);
7278 tcg_temp_free_i32(ahp);
7279 tcg_temp_free_ptr(fpst);
7282 case NEON_2RM_VCVT_F32_F16:
7286 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7290 fpst = get_fpstatus_ptr(true);
7291 ahp = get_ahp_flag();
7292 tmp3 = tcg_temp_new_i32();
7293 tmp = neon_load_reg(rm, 0);
7294 tmp2 = neon_load_reg(rm, 1);
7295 tcg_gen_ext16u_i32(tmp3, tmp);
7296 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7297 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7298 tcg_gen_shri_i32(tmp3, tmp, 16);
7299 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7300 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7301 tcg_temp_free_i32(tmp);
7302 tcg_gen_ext16u_i32(tmp3, tmp2);
7303 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7304 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7305 tcg_gen_shri_i32(tmp3, tmp2, 16);
7306 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7307 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7308 tcg_temp_free_i32(tmp2);
7309 tcg_temp_free_i32(tmp3);
7310 tcg_temp_free_i32(ahp);
7311 tcg_temp_free_ptr(fpst);
7314 case NEON_2RM_AESE: case NEON_2RM_AESMC:
7315 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
7316 || ((rm | rd) & 1)) {
7319 ptr1 = vfp_reg_ptr(true, rd);
7320 ptr2 = vfp_reg_ptr(true, rm);
7322 /* Bit 6 is the lowest opcode bit; it distinguishes between
7323 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7325 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7327 if (op == NEON_2RM_AESE) {
7328 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
7330 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
7332 tcg_temp_free_ptr(ptr1);
7333 tcg_temp_free_ptr(ptr2);
7334 tcg_temp_free_i32(tmp3);
7336 case NEON_2RM_SHA1H:
7337 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
7338 || ((rm | rd) & 1)) {
7341 ptr1 = vfp_reg_ptr(true, rd);
7342 ptr2 = vfp_reg_ptr(true, rm);
7344 gen_helper_crypto_sha1h(ptr1, ptr2);
7346 tcg_temp_free_ptr(ptr1);
7347 tcg_temp_free_ptr(ptr2);
7349 case NEON_2RM_SHA1SU1:
7350 if ((rm | rd) & 1) {
7353 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7355 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
7358 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
7361 ptr1 = vfp_reg_ptr(true, rd);
7362 ptr2 = vfp_reg_ptr(true, rm);
7364 gen_helper_crypto_sha256su0(ptr1, ptr2);
7366 gen_helper_crypto_sha1su1(ptr1, ptr2);
7368 tcg_temp_free_ptr(ptr1);
7369 tcg_temp_free_ptr(ptr2);
7373 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7374 if (neon_2rm_is_float_op(op)) {
7375 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7376 neon_reg_offset(rm, pass));
7379 tmp = neon_load_reg(rm, pass);
7382 case NEON_2RM_VREV32:
7384 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7385 case 1: gen_swap_half(tmp); break;
7389 case NEON_2RM_VREV16:
7394 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7395 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7396 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7402 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7403 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7404 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
7409 gen_helper_neon_cnt_u8(tmp, tmp);
7412 tcg_gen_not_i32(tmp, tmp);
7414 case NEON_2RM_VQABS:
7417 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7420 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7423 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7428 case NEON_2RM_VQNEG:
7431 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7434 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7437 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7442 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7443 tmp2 = tcg_const_i32(0);
7445 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7446 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7447 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7450 tcg_temp_free_i32(tmp2);
7451 if (op == NEON_2RM_VCLE0) {
7452 tcg_gen_not_i32(tmp, tmp);
7455 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7456 tmp2 = tcg_const_i32(0);
7458 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7459 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7460 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7463 tcg_temp_free_i32(tmp2);
7464 if (op == NEON_2RM_VCLT0) {
7465 tcg_gen_not_i32(tmp, tmp);
7468 case NEON_2RM_VCEQ0:
7469 tmp2 = tcg_const_i32(0);
7471 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7472 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7473 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7476 tcg_temp_free_i32(tmp2);
7480 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7481 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7482 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7487 tmp2 = tcg_const_i32(0);
7488 gen_neon_rsb(size, tmp, tmp2);
7489 tcg_temp_free_i32(tmp2);
7491 case NEON_2RM_VCGT0_F:
7493 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7494 tmp2 = tcg_const_i32(0);
7495 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7496 tcg_temp_free_i32(tmp2);
7497 tcg_temp_free_ptr(fpstatus);
7500 case NEON_2RM_VCGE0_F:
7502 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7503 tmp2 = tcg_const_i32(0);
7504 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7505 tcg_temp_free_i32(tmp2);
7506 tcg_temp_free_ptr(fpstatus);
7509 case NEON_2RM_VCEQ0_F:
7511 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7512 tmp2 = tcg_const_i32(0);
7513 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7514 tcg_temp_free_i32(tmp2);
7515 tcg_temp_free_ptr(fpstatus);
7518 case NEON_2RM_VCLE0_F:
7520 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7521 tmp2 = tcg_const_i32(0);
7522 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7523 tcg_temp_free_i32(tmp2);
7524 tcg_temp_free_ptr(fpstatus);
7527 case NEON_2RM_VCLT0_F:
7529 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7530 tmp2 = tcg_const_i32(0);
7531 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7532 tcg_temp_free_i32(tmp2);
7533 tcg_temp_free_ptr(fpstatus);
7536 case NEON_2RM_VABS_F:
7539 case NEON_2RM_VNEG_F:
7543 tmp2 = neon_load_reg(rd, pass);
7544 neon_store_reg(rm, pass, tmp2);
7547 tmp2 = neon_load_reg(rd, pass);
7549 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7550 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7553 neon_store_reg(rm, pass, tmp2);
7555 case NEON_2RM_VRINTN:
7556 case NEON_2RM_VRINTA:
7557 case NEON_2RM_VRINTM:
7558 case NEON_2RM_VRINTP:
7559 case NEON_2RM_VRINTZ:
7562 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7565 if (op == NEON_2RM_VRINTZ) {
7566 rmode = FPROUNDING_ZERO;
7568 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7571 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7572 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7574 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7575 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7577 tcg_temp_free_ptr(fpstatus);
7578 tcg_temp_free_i32(tcg_rmode);
7581 case NEON_2RM_VRINTX:
7583 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7584 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7585 tcg_temp_free_ptr(fpstatus);
7588 case NEON_2RM_VCVTAU:
7589 case NEON_2RM_VCVTAS:
7590 case NEON_2RM_VCVTNU:
7591 case NEON_2RM_VCVTNS:
7592 case NEON_2RM_VCVTPU:
7593 case NEON_2RM_VCVTPS:
7594 case NEON_2RM_VCVTMU:
7595 case NEON_2RM_VCVTMS:
7597 bool is_signed = !extract32(insn, 7, 1);
7598 TCGv_ptr fpst = get_fpstatus_ptr(1);
7599 TCGv_i32 tcg_rmode, tcg_shift;
7600 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7602 tcg_shift = tcg_const_i32(0);
7603 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7604 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7608 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7611 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7615 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7617 tcg_temp_free_i32(tcg_rmode);
7618 tcg_temp_free_i32(tcg_shift);
7619 tcg_temp_free_ptr(fpst);
7622 case NEON_2RM_VRECPE:
7624 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7625 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7626 tcg_temp_free_ptr(fpstatus);
7629 case NEON_2RM_VRSQRTE:
7631 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7632 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7633 tcg_temp_free_ptr(fpstatus);
7636 case NEON_2RM_VRECPE_F:
7638 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7639 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7640 tcg_temp_free_ptr(fpstatus);
7643 case NEON_2RM_VRSQRTE_F:
7645 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7646 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7647 tcg_temp_free_ptr(fpstatus);
7650 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7653 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7656 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7657 gen_vfp_tosiz(0, 1);
7659 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7660 gen_vfp_touiz(0, 1);
7663 /* Reserved op values were caught by the
7664 * neon_2rm_sizes[] check earlier.
7668 if (neon_2rm_is_float_op(op)) {
7669 tcg_gen_st_f32(cpu_F0s, cpu_env,
7670 neon_reg_offset(rd, pass));
7672 neon_store_reg(rd, pass, tmp);
7677 } else if ((insn & (1 << 10)) == 0) {
7679 int n = ((insn >> 8) & 3) + 1;
7680 if ((rn + n) > 32) {
7681 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7682 * helper function running off the end of the register file.
7687 if (insn & (1 << 6)) {
7688 tmp = neon_load_reg(rd, 0);
7690 tmp = tcg_temp_new_i32();
7691 tcg_gen_movi_i32(tmp, 0);
7693 tmp2 = neon_load_reg(rm, 0);
7694 ptr1 = vfp_reg_ptr(true, rn);
7695 tmp5 = tcg_const_i32(n);
7696 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7697 tcg_temp_free_i32(tmp);
7698 if (insn & (1 << 6)) {
7699 tmp = neon_load_reg(rd, 1);
7701 tmp = tcg_temp_new_i32();
7702 tcg_gen_movi_i32(tmp, 0);
7704 tmp3 = neon_load_reg(rm, 1);
7705 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
7706 tcg_temp_free_i32(tmp5);
7707 tcg_temp_free_ptr(ptr1);
7708 neon_store_reg(rd, 0, tmp2);
7709 neon_store_reg(rd, 1, tmp3);
7710 tcg_temp_free_i32(tmp);
7711 } else if ((insn & 0x380) == 0) {
7713 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7716 if (insn & (1 << 19)) {
7717 tmp = neon_load_reg(rm, 1);
7719 tmp = neon_load_reg(rm, 0);
7721 if (insn & (1 << 16)) {
7722 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7723 } else if (insn & (1 << 17)) {
7724 if ((insn >> 18) & 1)
7725 gen_neon_dup_high16(tmp);
7727 gen_neon_dup_low16(tmp);
7729 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7730 tmp2 = tcg_temp_new_i32();
7731 tcg_gen_mov_i32(tmp2, tmp);
7732 neon_store_reg(rd, pass, tmp2);
7734 tcg_temp_free_i32(tmp);
7743 /* Advanced SIMD three registers of the same length extension.
7744 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7745 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7746 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7747 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7749 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7751 gen_helper_gvec_3_ptr *fn_gvec_ptr;
7752 int rd, rn, rm, rot, size, opr_sz;
7756 q = extract32(insn, 6, 1);
7757 VFP_DREG_D(rd, insn);
7758 VFP_DREG_N(rn, insn);
7759 VFP_DREG_M(rm, insn);
7760 if ((rd | rn | rm) & q) {
7764 if ((insn & 0xfe200f10) == 0xfc200800) {
7765 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
7766 size = extract32(insn, 20, 1);
7767 rot = extract32(insn, 23, 2);
7768 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7769 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7772 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7773 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7774 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
7775 size = extract32(insn, 20, 1);
7776 rot = extract32(insn, 24, 1);
7777 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7778 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7781 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
7786 if (s->fp_excp_el) {
7787 gen_exception_insn(s, 4, EXCP_UDEF,
7788 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7791 if (!s->vfp_enabled) {
7795 opr_sz = (1 + q) * 8;
7796 fpst = get_fpstatus_ptr(1);
7797 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7798 vfp_reg_offset(1, rn),
7799 vfp_reg_offset(1, rm), fpst,
7800 opr_sz, opr_sz, rot, fn_gvec_ptr);
7801 tcg_temp_free_ptr(fpst);
7805 /* Advanced SIMD two registers and a scalar extension.
7806 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7807 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7808 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7809 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7813 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7815 int rd, rn, rm, rot, size, opr_sz;
7819 q = extract32(insn, 6, 1);
7820 VFP_DREG_D(rd, insn);
7821 VFP_DREG_N(rn, insn);
7822 VFP_DREG_M(rm, insn);
7823 if ((rd | rn) & q) {
7827 if ((insn & 0xff000f10) == 0xfe000800) {
7828 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7829 rot = extract32(insn, 20, 2);
7830 size = extract32(insn, 23, 1);
7831 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7832 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7839 if (s->fp_excp_el) {
7840 gen_exception_insn(s, 4, EXCP_UDEF,
7841 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7844 if (!s->vfp_enabled) {
7848 opr_sz = (1 + q) * 8;
7849 fpst = get_fpstatus_ptr(1);
7850 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7851 vfp_reg_offset(1, rn),
7852 vfp_reg_offset(1, rm), fpst,
7853 opr_sz, opr_sz, rot,
7854 size ? gen_helper_gvec_fcmlas_idx
7855 : gen_helper_gvec_fcmlah_idx);
7856 tcg_temp_free_ptr(fpst);
7860 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7862 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7863 const ARMCPRegInfo *ri;
7865 cpnum = (insn >> 8) & 0xf;
7867 /* First check for coprocessor space used for XScale/iwMMXt insns */
7868 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7869 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7872 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7873 return disas_iwmmxt_insn(s, insn);
7874 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7875 return disas_dsp_insn(s, insn);
7880 /* Otherwise treat as a generic register access */
7881 is64 = (insn & (1 << 25)) == 0;
7882 if (!is64 && ((insn & (1 << 4)) == 0)) {
7890 opc1 = (insn >> 4) & 0xf;
7892 rt2 = (insn >> 16) & 0xf;
7894 crn = (insn >> 16) & 0xf;
7895 opc1 = (insn >> 21) & 7;
7896 opc2 = (insn >> 5) & 7;
7899 isread = (insn >> 20) & 1;
7900 rt = (insn >> 12) & 0xf;
7902 ri = get_arm_cp_reginfo(s->cp_regs,
7903 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7905 /* Check access permissions */
7906 if (!cp_access_ok(s->current_el, ri, isread)) {
7911 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7912 /* Emit code to perform further access permissions checks at
7913 * runtime; this may result in an exception.
7914 * Note that on XScale all cp0..c13 registers do an access check
7915 * call in order to handle c15_cpar.
7918 TCGv_i32 tcg_syn, tcg_isread;
7921 /* Note that since we are an implementation which takes an
7922 * exception on a trapped conditional instruction only if the
7923 * instruction passes its condition code check, we can take
7924 * advantage of the clause in the ARM ARM that allows us to set
7925 * the COND field in the instruction to 0xE in all cases.
7926 * We could fish the actual condition out of the insn (ARM)
7927 * or the condexec bits (Thumb) but it isn't necessary.
7932 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7935 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7941 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7944 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7949 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7950 * so this can only happen if this is an ARMv7 or earlier CPU,
7951 * in which case the syndrome information won't actually be
7954 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7955 syndrome = syn_uncategorized();
7959 gen_set_condexec(s);
7960 gen_set_pc_im(s, s->pc - 4);
7961 tmpptr = tcg_const_ptr(ri);
7962 tcg_syn = tcg_const_i32(syndrome);
7963 tcg_isread = tcg_const_i32(isread);
7964 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7966 tcg_temp_free_ptr(tmpptr);
7967 tcg_temp_free_i32(tcg_syn);
7968 tcg_temp_free_i32(tcg_isread);
7971 /* Handle special cases first */
7972 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7979 gen_set_pc_im(s, s->pc);
7980 s->base.is_jmp = DISAS_WFI;
7986 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7995 if (ri->type & ARM_CP_CONST) {
7996 tmp64 = tcg_const_i64(ri->resetvalue);
7997 } else if (ri->readfn) {
7999 tmp64 = tcg_temp_new_i64();
8000 tmpptr = tcg_const_ptr(ri);
8001 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8002 tcg_temp_free_ptr(tmpptr);
8004 tmp64 = tcg_temp_new_i64();
8005 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8007 tmp = tcg_temp_new_i32();
8008 tcg_gen_extrl_i64_i32(tmp, tmp64);
8009 store_reg(s, rt, tmp);
8010 tcg_gen_shri_i64(tmp64, tmp64, 32);
8011 tmp = tcg_temp_new_i32();
8012 tcg_gen_extrl_i64_i32(tmp, tmp64);
8013 tcg_temp_free_i64(tmp64);
8014 store_reg(s, rt2, tmp);
8017 if (ri->type & ARM_CP_CONST) {
8018 tmp = tcg_const_i32(ri->resetvalue);
8019 } else if (ri->readfn) {
8021 tmp = tcg_temp_new_i32();
8022 tmpptr = tcg_const_ptr(ri);
8023 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8024 tcg_temp_free_ptr(tmpptr);
8026 tmp = load_cpu_offset(ri->fieldoffset);
8029 /* Destination register of r15 for 32 bit loads sets
8030 * the condition codes from the high 4 bits of the value
8033 tcg_temp_free_i32(tmp);
8035 store_reg(s, rt, tmp);
8040 if (ri->type & ARM_CP_CONST) {
8041 /* If not forbidden by access permissions, treat as WI */
8046 TCGv_i32 tmplo, tmphi;
8047 TCGv_i64 tmp64 = tcg_temp_new_i64();
8048 tmplo = load_reg(s, rt);
8049 tmphi = load_reg(s, rt2);
8050 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8051 tcg_temp_free_i32(tmplo);
8052 tcg_temp_free_i32(tmphi);
8054 TCGv_ptr tmpptr = tcg_const_ptr(ri);
8055 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8056 tcg_temp_free_ptr(tmpptr);
8058 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8060 tcg_temp_free_i64(tmp64);
8065 tmp = load_reg(s, rt);
8066 tmpptr = tcg_const_ptr(ri);
8067 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8068 tcg_temp_free_ptr(tmpptr);
8069 tcg_temp_free_i32(tmp);
8071 TCGv_i32 tmp = load_reg(s, rt);
8072 store_cpu_offset(tmp, ri->fieldoffset);
8077 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8078 /* I/O operations must end the TB here (whether read or write) */
8081 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
8082 /* We default to ending the TB on a coprocessor register write,
8083 * but allow this to be suppressed by the register definition
8084 * (usually only necessary to work around guest bugs).
8092 /* Unknown register; this might be a guest error or a QEMU
8093 * unimplemented feature.
8096 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8097 "64 bit system register cp:%d opc1: %d crm:%d "
8099 isread ? "read" : "write", cpnum, opc1, crm,
8100 s->ns ? "non-secure" : "secure");
8102 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8103 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8105 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8106 s->ns ? "non-secure" : "secure");
8113 /* Store a 64-bit value to a register pair. Clobbers val. */
8114 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
8117 tmp = tcg_temp_new_i32();
8118 tcg_gen_extrl_i64_i32(tmp, val);
8119 store_reg(s, rlow, tmp);
8120 tmp = tcg_temp_new_i32();
8121 tcg_gen_shri_i64(val, val, 32);
8122 tcg_gen_extrl_i64_i32(tmp, val);
8123 store_reg(s, rhigh, tmp);
8126 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
8127 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
8132 /* Load value and extend to 64 bits. */
8133 tmp = tcg_temp_new_i64();
8134 tmp2 = load_reg(s, rlow);
8135 tcg_gen_extu_i32_i64(tmp, tmp2);
8136 tcg_temp_free_i32(tmp2);
8137 tcg_gen_add_i64(val, val, tmp);
8138 tcg_temp_free_i64(tmp);
8141 /* load and add a 64-bit value from a register pair. */
8142 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
8148 /* Load 64-bit value rd:rn. */
8149 tmpl = load_reg(s, rlow);
8150 tmph = load_reg(s, rhigh);
8151 tmp = tcg_temp_new_i64();
8152 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
8153 tcg_temp_free_i32(tmpl);
8154 tcg_temp_free_i32(tmph);
8155 tcg_gen_add_i64(val, val, tmp);
8156 tcg_temp_free_i64(tmp);
8159 /* Set N and Z flags from hi|lo. */
8160 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
8162 tcg_gen_mov_i32(cpu_NF, hi);
8163 tcg_gen_or_i32(cpu_ZF, lo, hi);
8166 /* Load/Store exclusive instructions are implemented by remembering
8167 the value/address loaded, and seeing if these are the same
8168 when the store is performed. This should be sufficient to implement
8169 the architecturally mandated semantics, and avoids having to monitor
8170 regular stores. The compare vs the remembered value is done during
8171 the cmpxchg operation, but we must compare the addresses manually. */
8172 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
8173 TCGv_i32 addr, int size)
8175 TCGv_i32 tmp = tcg_temp_new_i32();
8176 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8181 TCGv_i32 tmp2 = tcg_temp_new_i32();
8182 TCGv_i64 t64 = tcg_temp_new_i64();
8184 /* For AArch32, architecturally the 32-bit word at the lowest
8185 * address is always Rt and the one at addr+4 is Rt2, even if
8186 * the CPU is big-endian. That means we don't want to do a
8187 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8188 * for an architecturally 64-bit access, but instead do a
8189 * 64-bit access using MO_BE if appropriate and then split
8191 * This only makes a difference for BE32 user-mode, where
8192 * frob64() must not flip the two halves of the 64-bit data
8193 * but this code must treat BE32 user-mode like BE32 system.
8195 TCGv taddr = gen_aa32_addr(s, addr, opc);
8197 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8198 tcg_temp_free(taddr);
8199 tcg_gen_mov_i64(cpu_exclusive_val, t64);
8200 if (s->be_data == MO_BE) {
8201 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8203 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8205 tcg_temp_free_i64(t64);
8207 store_reg(s, rt2, tmp2);
8209 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
8210 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
8213 store_reg(s, rt, tmp);
8214 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
8217 static void gen_clrex(DisasContext *s)
8219 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8222 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
8223 TCGv_i32 addr, int size)
8225 TCGv_i32 t0, t1, t2;
8228 TCGLabel *done_label;
8229 TCGLabel *fail_label;
8230 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8232 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8238 fail_label = gen_new_label();
8239 done_label = gen_new_label();
8240 extaddr = tcg_temp_new_i64();
8241 tcg_gen_extu_i32_i64(extaddr, addr);
8242 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8243 tcg_temp_free_i64(extaddr);
8245 taddr = gen_aa32_addr(s, addr, opc);
8246 t0 = tcg_temp_new_i32();
8247 t1 = load_reg(s, rt);
8249 TCGv_i64 o64 = tcg_temp_new_i64();
8250 TCGv_i64 n64 = tcg_temp_new_i64();
8252 t2 = load_reg(s, rt2);
8253 /* For AArch32, architecturally the 32-bit word at the lowest
8254 * address is always Rt and the one at addr+4 is Rt2, even if
8255 * the CPU is big-endian. Since we're going to treat this as a
8256 * single 64-bit BE store, we need to put the two halves in the
8257 * opposite order for BE to LE, so that they end up in the right
8259 * We don't want gen_aa32_frob64() because that does the wrong
8260 * thing for BE32 usermode.
8262 if (s->be_data == MO_BE) {
8263 tcg_gen_concat_i32_i64(n64, t2, t1);
8265 tcg_gen_concat_i32_i64(n64, t1, t2);
8267 tcg_temp_free_i32(t2);
8269 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8270 get_mem_index(s), opc);
8271 tcg_temp_free_i64(n64);
8273 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8274 tcg_gen_extrl_i64_i32(t0, o64);
8276 tcg_temp_free_i64(o64);
8278 t2 = tcg_temp_new_i32();
8279 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8280 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8281 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8282 tcg_temp_free_i32(t2);
8284 tcg_temp_free_i32(t1);
8285 tcg_temp_free(taddr);
8286 tcg_gen_mov_i32(cpu_R[rd], t0);
8287 tcg_temp_free_i32(t0);
8288 tcg_gen_br(done_label);
8290 gen_set_label(fail_label);
8291 tcg_gen_movi_i32(cpu_R[rd], 1);
8292 gen_set_label(done_label);
8293 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8299 * @mode: mode field from insn (which stack to store to)
8300 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8301 * @writeback: true if writeback bit set
8303 * Generate code for the SRS (Store Return State) insn.
8305 static void gen_srs(DisasContext *s,
8306 uint32_t mode, uint32_t amode, bool writeback)
8313 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
8314 * and specified mode is monitor mode
8315 * - UNDEFINED in Hyp mode
8316 * - UNPREDICTABLE in User or System mode
8317 * - UNPREDICTABLE if the specified mode is:
8318 * -- not implemented
8319 * -- not a valid mode number
8320 * -- a mode that's at a higher exception level
8321 * -- Monitor, if we are Non-secure
8322 * For the UNPREDICTABLE cases we choose to UNDEF.
8324 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
8325 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8329 if (s->current_el == 0 || s->current_el == 2) {
8334 case ARM_CPU_MODE_USR:
8335 case ARM_CPU_MODE_FIQ:
8336 case ARM_CPU_MODE_IRQ:
8337 case ARM_CPU_MODE_SVC:
8338 case ARM_CPU_MODE_ABT:
8339 case ARM_CPU_MODE_UND:
8340 case ARM_CPU_MODE_SYS:
8342 case ARM_CPU_MODE_HYP:
8343 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8347 case ARM_CPU_MODE_MON:
8348 /* No need to check specifically for "are we non-secure" because
8349 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8350 * so if this isn't EL3 then we must be non-secure.
8352 if (s->current_el != 3) {
8361 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8362 default_exception_el(s));
8366 addr = tcg_temp_new_i32();
8367 tmp = tcg_const_i32(mode);
8368 /* get_r13_banked() will raise an exception if called from System mode */
8369 gen_set_condexec(s);
8370 gen_set_pc_im(s, s->pc - 4);
8371 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8372 tcg_temp_free_i32(tmp);
8389 tcg_gen_addi_i32(addr, addr, offset);
8390 tmp = load_reg(s, 14);
8391 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8392 tcg_temp_free_i32(tmp);
8393 tmp = load_cpu_field(spsr);
8394 tcg_gen_addi_i32(addr, addr, 4);
8395 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8396 tcg_temp_free_i32(tmp);
8414 tcg_gen_addi_i32(addr, addr, offset);
8415 tmp = tcg_const_i32(mode);
8416 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8417 tcg_temp_free_i32(tmp);
8419 tcg_temp_free_i32(addr);
8420 s->base.is_jmp = DISAS_UPDATE;
8423 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8425 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8432 /* M variants do not implement ARM mode; this must raise the INVSTATE
8433 * UsageFault exception.
8435 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8436 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8437 default_exception_el(s));
8442 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8443 * choose to UNDEF. In ARMv5 and above the space is used
8444 * for miscellaneous unconditional instructions.
8448 /* Unconditional instructions. */
8449 if (((insn >> 25) & 7) == 1) {
8450 /* NEON Data processing. */
8451 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8455 if (disas_neon_data_insn(s, insn)) {
8460 if ((insn & 0x0f100000) == 0x04000000) {
8461 /* NEON load/store. */
8462 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8466 if (disas_neon_ls_insn(s, insn)) {
8471 if ((insn & 0x0f000e10) == 0x0e000a00) {
8473 if (disas_vfp_insn(s, insn)) {
8478 if (((insn & 0x0f30f000) == 0x0510f000) ||
8479 ((insn & 0x0f30f010) == 0x0710f000)) {
8480 if ((insn & (1 << 22)) == 0) {
8482 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8486 /* Otherwise PLD; v5TE+ */
8490 if (((insn & 0x0f70f000) == 0x0450f000) ||
8491 ((insn & 0x0f70f010) == 0x0650f000)) {
8493 return; /* PLI; V7 */
8495 if (((insn & 0x0f700000) == 0x04100000) ||
8496 ((insn & 0x0f700010) == 0x06100000)) {
8497 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8500 return; /* v7MP: Unallocated memory hint: must NOP */
8503 if ((insn & 0x0ffffdff) == 0x01010000) {
8506 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8507 gen_helper_setend(cpu_env);
8508 s->base.is_jmp = DISAS_UPDATE;
8511 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8512 switch ((insn >> 4) & 0xf) {
8520 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8523 /* We need to break the TB after this insn to execute
8524 * self-modifying code correctly and also to take
8525 * any pending interrupts immediately.
8527 gen_goto_tb(s, 0, s->pc & ~1);
8532 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8535 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8537 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8543 rn = (insn >> 16) & 0xf;
8544 addr = load_reg(s, rn);
8545 i = (insn >> 23) & 3;
8547 case 0: offset = -4; break; /* DA */
8548 case 1: offset = 0; break; /* IA */
8549 case 2: offset = -8; break; /* DB */
8550 case 3: offset = 4; break; /* IB */
8554 tcg_gen_addi_i32(addr, addr, offset);
8555 /* Load PC into tmp and CPSR into tmp2. */
8556 tmp = tcg_temp_new_i32();
8557 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8558 tcg_gen_addi_i32(addr, addr, 4);
8559 tmp2 = tcg_temp_new_i32();
8560 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8561 if (insn & (1 << 21)) {
8562 /* Base writeback. */
8564 case 0: offset = -8; break;
8565 case 1: offset = 4; break;
8566 case 2: offset = -4; break;
8567 case 3: offset = 0; break;
8571 tcg_gen_addi_i32(addr, addr, offset);
8572 store_reg(s, rn, addr);
8574 tcg_temp_free_i32(addr);
8576 gen_rfe(s, tmp, tmp2);
8578 } else if ((insn & 0x0e000000) == 0x0a000000) {
8579 /* branch link and change to thumb (blx <offset>) */
8582 val = (uint32_t)s->pc;
8583 tmp = tcg_temp_new_i32();
8584 tcg_gen_movi_i32(tmp, val);
8585 store_reg(s, 14, tmp);
8586 /* Sign-extend the 24-bit offset */
8587 offset = (((int32_t)insn) << 8) >> 8;
8588 /* offset * 4 + bit24 * 2 + (thumb bit) */
8589 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8590 /* pipeline offset */
8592 /* protected by ARCH(5); above, near the start of uncond block */
8595 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8596 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8597 /* iWMMXt register transfer. */
8598 if (extract32(s->c15_cpar, 1, 1)) {
8599 if (!disas_iwmmxt_insn(s, insn)) {
8604 } else if ((insn & 0x0e000a00) == 0x0c000800
8605 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8606 if (disas_neon_insn_3same_ext(s, insn)) {
8610 } else if ((insn & 0x0f000a00) == 0x0e000800
8611 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8612 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8616 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8617 /* Coprocessor double register transfer. */
8619 } else if ((insn & 0x0f000010) == 0x0e000010) {
8620 /* Additional coprocessor register transfer. */
8621 } else if ((insn & 0x0ff10020) == 0x01000000) {
8624 /* cps (privileged) */
8628 if (insn & (1 << 19)) {
8629 if (insn & (1 << 8))
8631 if (insn & (1 << 7))
8633 if (insn & (1 << 6))
8635 if (insn & (1 << 18))
8638 if (insn & (1 << 17)) {
8640 val |= (insn & 0x1f);
8643 gen_set_psr_im(s, mask, 0, val);
8650 /* if not always execute, we generate a conditional jump to
8652 s->condlabel = gen_new_label();
8653 arm_gen_test_cc(cond ^ 1, s->condlabel);
8656 if ((insn & 0x0f900000) == 0x03000000) {
8657 if ((insn & (1 << 21)) == 0) {
8659 rd = (insn >> 12) & 0xf;
8660 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8661 if ((insn & (1 << 22)) == 0) {
8663 tmp = tcg_temp_new_i32();
8664 tcg_gen_movi_i32(tmp, val);
8667 tmp = load_reg(s, rd);
8668 tcg_gen_ext16u_i32(tmp, tmp);
8669 tcg_gen_ori_i32(tmp, tmp, val << 16);
8671 store_reg(s, rd, tmp);
8673 if (((insn >> 12) & 0xf) != 0xf)
8675 if (((insn >> 16) & 0xf) == 0) {
8676 gen_nop_hint(s, insn & 0xff);
8678 /* CPSR = immediate */
8680 shift = ((insn >> 8) & 0xf) * 2;
8682 val = (val >> shift) | (val << (32 - shift));
8683 i = ((insn & (1 << 22)) != 0);
8684 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8690 } else if ((insn & 0x0f900000) == 0x01000000
8691 && (insn & 0x00000090) != 0x00000090) {
8692 /* miscellaneous instructions */
8693 op1 = (insn >> 21) & 3;
8694 sh = (insn >> 4) & 0xf;
8697 case 0x0: /* MSR, MRS */
8698 if (insn & (1 << 9)) {
8699 /* MSR (banked) and MRS (banked) */
8700 int sysm = extract32(insn, 16, 4) |
8701 (extract32(insn, 8, 1) << 4);
8702 int r = extract32(insn, 22, 1);
8706 gen_msr_banked(s, r, sysm, rm);
8709 int rd = extract32(insn, 12, 4);
8711 gen_mrs_banked(s, r, sysm, rd);
8716 /* MSR, MRS (for PSRs) */
8719 tmp = load_reg(s, rm);
8720 i = ((op1 & 2) != 0);
8721 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8725 rd = (insn >> 12) & 0xf;
8729 tmp = load_cpu_field(spsr);
8731 tmp = tcg_temp_new_i32();
8732 gen_helper_cpsr_read(tmp, cpu_env);
8734 store_reg(s, rd, tmp);
8739 /* branch/exchange thumb (bx). */
8741 tmp = load_reg(s, rm);
8743 } else if (op1 == 3) {
8746 rd = (insn >> 12) & 0xf;
8747 tmp = load_reg(s, rm);
8748 tcg_gen_clzi_i32(tmp, tmp, 32);
8749 store_reg(s, rd, tmp);
8757 /* Trivial implementation equivalent to bx. */
8758 tmp = load_reg(s, rm);
8769 /* branch link/exchange thumb (blx) */
8770 tmp = load_reg(s, rm);
8771 tmp2 = tcg_temp_new_i32();
8772 tcg_gen_movi_i32(tmp2, s->pc);
8773 store_reg(s, 14, tmp2);
8779 uint32_t c = extract32(insn, 8, 4);
8781 /* Check this CPU supports ARMv8 CRC instructions.
8782 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8783 * Bits 8, 10 and 11 should be zero.
8785 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8790 rn = extract32(insn, 16, 4);
8791 rd = extract32(insn, 12, 4);
8793 tmp = load_reg(s, rn);
8794 tmp2 = load_reg(s, rm);
8796 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8797 } else if (op1 == 1) {
8798 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8800 tmp3 = tcg_const_i32(1 << op1);
8802 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8804 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8806 tcg_temp_free_i32(tmp2);
8807 tcg_temp_free_i32(tmp3);
8808 store_reg(s, rd, tmp);
8811 case 0x5: /* saturating add/subtract */
8813 rd = (insn >> 12) & 0xf;
8814 rn = (insn >> 16) & 0xf;
8815 tmp = load_reg(s, rm);
8816 tmp2 = load_reg(s, rn);
8818 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8820 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8822 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8823 tcg_temp_free_i32(tmp2);
8824 store_reg(s, rd, tmp);
8828 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8837 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
8840 /* Hypervisor call (v7) */
8848 /* Secure monitor call (v6+) */
8856 g_assert_not_reached();
8860 case 0x8: /* signed multiply */
8865 rs = (insn >> 8) & 0xf;
8866 rn = (insn >> 12) & 0xf;
8867 rd = (insn >> 16) & 0xf;
8869 /* (32 * 16) >> 16 */
8870 tmp = load_reg(s, rm);
8871 tmp2 = load_reg(s, rs);
8873 tcg_gen_sari_i32(tmp2, tmp2, 16);
8876 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8877 tcg_gen_shri_i64(tmp64, tmp64, 16);
8878 tmp = tcg_temp_new_i32();
8879 tcg_gen_extrl_i64_i32(tmp, tmp64);
8880 tcg_temp_free_i64(tmp64);
8881 if ((sh & 2) == 0) {
8882 tmp2 = load_reg(s, rn);
8883 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8884 tcg_temp_free_i32(tmp2);
8886 store_reg(s, rd, tmp);
8889 tmp = load_reg(s, rm);
8890 tmp2 = load_reg(s, rs);
8891 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8892 tcg_temp_free_i32(tmp2);
8894 tmp64 = tcg_temp_new_i64();
8895 tcg_gen_ext_i32_i64(tmp64, tmp);
8896 tcg_temp_free_i32(tmp);
8897 gen_addq(s, tmp64, rn, rd);
8898 gen_storeq_reg(s, rn, rd, tmp64);
8899 tcg_temp_free_i64(tmp64);
8902 tmp2 = load_reg(s, rn);
8903 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8904 tcg_temp_free_i32(tmp2);
8906 store_reg(s, rd, tmp);
8913 } else if (((insn & 0x0e000000) == 0 &&
8914 (insn & 0x00000090) != 0x90) ||
8915 ((insn & 0x0e000000) == (1 << 25))) {
8916 int set_cc, logic_cc, shiftop;
8918 op1 = (insn >> 21) & 0xf;
8919 set_cc = (insn >> 20) & 1;
8920 logic_cc = table_logic_cc[op1] & set_cc;
8922 /* data processing instruction */
8923 if (insn & (1 << 25)) {
8924 /* immediate operand */
8926 shift = ((insn >> 8) & 0xf) * 2;
8928 val = (val >> shift) | (val << (32 - shift));
8930 tmp2 = tcg_temp_new_i32();
8931 tcg_gen_movi_i32(tmp2, val);
8932 if (logic_cc && shift) {
8933 gen_set_CF_bit31(tmp2);
8938 tmp2 = load_reg(s, rm);
8939 shiftop = (insn >> 5) & 3;
8940 if (!(insn & (1 << 4))) {
8941 shift = (insn >> 7) & 0x1f;
8942 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8944 rs = (insn >> 8) & 0xf;
8945 tmp = load_reg(s, rs);
8946 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8949 if (op1 != 0x0f && op1 != 0x0d) {
8950 rn = (insn >> 16) & 0xf;
8951 tmp = load_reg(s, rn);
8955 rd = (insn >> 12) & 0xf;
8958 tcg_gen_and_i32(tmp, tmp, tmp2);
8962 store_reg_bx(s, rd, tmp);
8965 tcg_gen_xor_i32(tmp, tmp, tmp2);
8969 store_reg_bx(s, rd, tmp);
8972 if (set_cc && rd == 15) {
8973 /* SUBS r15, ... is used for exception return. */
8977 gen_sub_CC(tmp, tmp, tmp2);
8978 gen_exception_return(s, tmp);
8981 gen_sub_CC(tmp, tmp, tmp2);
8983 tcg_gen_sub_i32(tmp, tmp, tmp2);
8985 store_reg_bx(s, rd, tmp);
8990 gen_sub_CC(tmp, tmp2, tmp);
8992 tcg_gen_sub_i32(tmp, tmp2, tmp);
8994 store_reg_bx(s, rd, tmp);
8998 gen_add_CC(tmp, tmp, tmp2);
9000 tcg_gen_add_i32(tmp, tmp, tmp2);
9002 store_reg_bx(s, rd, tmp);
9006 gen_adc_CC(tmp, tmp, tmp2);
9008 gen_add_carry(tmp, tmp, tmp2);
9010 store_reg_bx(s, rd, tmp);
9014 gen_sbc_CC(tmp, tmp, tmp2);
9016 gen_sub_carry(tmp, tmp, tmp2);
9018 store_reg_bx(s, rd, tmp);
9022 gen_sbc_CC(tmp, tmp2, tmp);
9024 gen_sub_carry(tmp, tmp2, tmp);
9026 store_reg_bx(s, rd, tmp);
9030 tcg_gen_and_i32(tmp, tmp, tmp2);
9033 tcg_temp_free_i32(tmp);
9037 tcg_gen_xor_i32(tmp, tmp, tmp2);
9040 tcg_temp_free_i32(tmp);
9044 gen_sub_CC(tmp, tmp, tmp2);
9046 tcg_temp_free_i32(tmp);
9050 gen_add_CC(tmp, tmp, tmp2);
9052 tcg_temp_free_i32(tmp);
9055 tcg_gen_or_i32(tmp, tmp, tmp2);
9059 store_reg_bx(s, rd, tmp);
9062 if (logic_cc && rd == 15) {
9063 /* MOVS r15, ... is used for exception return. */
9067 gen_exception_return(s, tmp2);
9072 store_reg_bx(s, rd, tmp2);
9076 tcg_gen_andc_i32(tmp, tmp, tmp2);
9080 store_reg_bx(s, rd, tmp);
9084 tcg_gen_not_i32(tmp2, tmp2);
9088 store_reg_bx(s, rd, tmp2);
9091 if (op1 != 0x0f && op1 != 0x0d) {
9092 tcg_temp_free_i32(tmp2);
9095 /* other instructions */
9096 op1 = (insn >> 24) & 0xf;
9100 /* multiplies, extra load/stores */
9101 sh = (insn >> 5) & 3;
9104 rd = (insn >> 16) & 0xf;
9105 rn = (insn >> 12) & 0xf;
9106 rs = (insn >> 8) & 0xf;
9108 op1 = (insn >> 20) & 0xf;
9110 case 0: case 1: case 2: case 3: case 6:
9112 tmp = load_reg(s, rs);
9113 tmp2 = load_reg(s, rm);
9114 tcg_gen_mul_i32(tmp, tmp, tmp2);
9115 tcg_temp_free_i32(tmp2);
9116 if (insn & (1 << 22)) {
9117 /* Subtract (mls) */
9119 tmp2 = load_reg(s, rn);
9120 tcg_gen_sub_i32(tmp, tmp2, tmp);
9121 tcg_temp_free_i32(tmp2);
9122 } else if (insn & (1 << 21)) {
9124 tmp2 = load_reg(s, rn);
9125 tcg_gen_add_i32(tmp, tmp, tmp2);
9126 tcg_temp_free_i32(tmp2);
9128 if (insn & (1 << 20))
9130 store_reg(s, rd, tmp);
9133 /* 64 bit mul double accumulate (UMAAL) */
9135 tmp = load_reg(s, rs);
9136 tmp2 = load_reg(s, rm);
9137 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9138 gen_addq_lo(s, tmp64, rn);
9139 gen_addq_lo(s, tmp64, rd);
9140 gen_storeq_reg(s, rn, rd, tmp64);
9141 tcg_temp_free_i64(tmp64);
9143 case 8: case 9: case 10: case 11:
9144 case 12: case 13: case 14: case 15:
9145 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
9146 tmp = load_reg(s, rs);
9147 tmp2 = load_reg(s, rm);
9148 if (insn & (1 << 22)) {
9149 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
9151 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
9153 if (insn & (1 << 21)) { /* mult accumulate */
9154 TCGv_i32 al = load_reg(s, rn);
9155 TCGv_i32 ah = load_reg(s, rd);
9156 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
9157 tcg_temp_free_i32(al);
9158 tcg_temp_free_i32(ah);
9160 if (insn & (1 << 20)) {
9161 gen_logicq_cc(tmp, tmp2);
9163 store_reg(s, rn, tmp);
9164 store_reg(s, rd, tmp2);
9170 rn = (insn >> 16) & 0xf;
9171 rd = (insn >> 12) & 0xf;
9172 if (insn & (1 << 23)) {
9173 /* load/store exclusive */
9174 int op2 = (insn >> 8) & 3;
9175 op1 = (insn >> 21) & 0x3;
9178 case 0: /* lda/stl */
9184 case 1: /* reserved */
9186 case 2: /* ldaex/stlex */
9189 case 3: /* ldrex/strex */
9198 addr = tcg_temp_local_new_i32();
9199 load_reg_var(s, addr, rn);
9201 /* Since the emulation does not have barriers,
9202 the acquire/release semantics need no special
9205 if (insn & (1 << 20)) {
9206 tmp = tcg_temp_new_i32();
9209 gen_aa32_ld32u_iss(s, tmp, addr,
9214 gen_aa32_ld8u_iss(s, tmp, addr,
9219 gen_aa32_ld16u_iss(s, tmp, addr,
9226 store_reg(s, rd, tmp);
9229 tmp = load_reg(s, rm);
9232 gen_aa32_st32_iss(s, tmp, addr,
9237 gen_aa32_st8_iss(s, tmp, addr,
9242 gen_aa32_st16_iss(s, tmp, addr,
9249 tcg_temp_free_i32(tmp);
9251 } else if (insn & (1 << 20)) {
9254 gen_load_exclusive(s, rd, 15, addr, 2);
9256 case 1: /* ldrexd */
9257 gen_load_exclusive(s, rd, rd + 1, addr, 3);
9259 case 2: /* ldrexb */
9260 gen_load_exclusive(s, rd, 15, addr, 0);
9262 case 3: /* ldrexh */
9263 gen_load_exclusive(s, rd, 15, addr, 1);
9272 gen_store_exclusive(s, rd, rm, 15, addr, 2);
9274 case 1: /* strexd */
9275 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
9277 case 2: /* strexb */
9278 gen_store_exclusive(s, rd, rm, 15, addr, 0);
9280 case 3: /* strexh */
9281 gen_store_exclusive(s, rd, rm, 15, addr, 1);
9287 tcg_temp_free_i32(addr);
9288 } else if ((insn & 0x00300f00) == 0) {
9289 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9294 TCGMemOp opc = s->be_data;
9298 if (insn & (1 << 22)) {
9301 opc |= MO_UL | MO_ALIGN;
9304 addr = load_reg(s, rn);
9305 taddr = gen_aa32_addr(s, addr, opc);
9306 tcg_temp_free_i32(addr);
9308 tmp = load_reg(s, rm);
9309 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9310 get_mem_index(s), opc);
9311 tcg_temp_free(taddr);
9312 store_reg(s, rd, tmp);
9319 bool load = insn & (1 << 20);
9320 bool wbit = insn & (1 << 21);
9321 bool pbit = insn & (1 << 24);
9322 bool doubleword = false;
9325 /* Misc load/store */
9326 rn = (insn >> 16) & 0xf;
9327 rd = (insn >> 12) & 0xf;
9329 /* ISS not valid if writeback */
9330 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9332 if (!load && (sh & 2)) {
9336 /* UNPREDICTABLE; we choose to UNDEF */
9339 load = (sh & 1) == 0;
9343 addr = load_reg(s, rn);
9345 gen_add_datah_offset(s, insn, 0, addr);
9352 tmp = load_reg(s, rd);
9353 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9354 tcg_temp_free_i32(tmp);
9355 tcg_gen_addi_i32(addr, addr, 4);
9356 tmp = load_reg(s, rd + 1);
9357 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9358 tcg_temp_free_i32(tmp);
9361 tmp = tcg_temp_new_i32();
9362 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9363 store_reg(s, rd, tmp);
9364 tcg_gen_addi_i32(addr, addr, 4);
9365 tmp = tcg_temp_new_i32();
9366 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9369 address_offset = -4;
9372 tmp = tcg_temp_new_i32();
9375 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9379 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9384 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9390 tmp = load_reg(s, rd);
9391 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
9392 tcg_temp_free_i32(tmp);
9394 /* Perform base writeback before the loaded value to
9395 ensure correct behavior with overlapping index registers.
9396 ldrd with base writeback is undefined if the
9397 destination and index registers overlap. */
9399 gen_add_datah_offset(s, insn, address_offset, addr);
9400 store_reg(s, rn, addr);
9403 tcg_gen_addi_i32(addr, addr, address_offset);
9404 store_reg(s, rn, addr);
9406 tcg_temp_free_i32(addr);
9409 /* Complete the load. */
9410 store_reg(s, rd, tmp);
9419 if (insn & (1 << 4)) {
9421 /* Armv6 Media instructions. */
9423 rn = (insn >> 16) & 0xf;
9424 rd = (insn >> 12) & 0xf;
9425 rs = (insn >> 8) & 0xf;
9426 switch ((insn >> 23) & 3) {
9427 case 0: /* Parallel add/subtract. */
9428 op1 = (insn >> 20) & 7;
9429 tmp = load_reg(s, rn);
9430 tmp2 = load_reg(s, rm);
9431 sh = (insn >> 5) & 7;
9432 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9434 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
9435 tcg_temp_free_i32(tmp2);
9436 store_reg(s, rd, tmp);
9439 if ((insn & 0x00700020) == 0) {
9440 /* Halfword pack. */
9441 tmp = load_reg(s, rn);
9442 tmp2 = load_reg(s, rm);
9443 shift = (insn >> 7) & 0x1f;
9444 if (insn & (1 << 6)) {
9448 tcg_gen_sari_i32(tmp2, tmp2, shift);
9449 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9450 tcg_gen_ext16u_i32(tmp2, tmp2);
9454 tcg_gen_shli_i32(tmp2, tmp2, shift);
9455 tcg_gen_ext16u_i32(tmp, tmp);
9456 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9458 tcg_gen_or_i32(tmp, tmp, tmp2);
9459 tcg_temp_free_i32(tmp2);
9460 store_reg(s, rd, tmp);
9461 } else if ((insn & 0x00200020) == 0x00200000) {
9463 tmp = load_reg(s, rm);
9464 shift = (insn >> 7) & 0x1f;
9465 if (insn & (1 << 6)) {
9468 tcg_gen_sari_i32(tmp, tmp, shift);
9470 tcg_gen_shli_i32(tmp, tmp, shift);
9472 sh = (insn >> 16) & 0x1f;
9473 tmp2 = tcg_const_i32(sh);
9474 if (insn & (1 << 22))
9475 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9477 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9478 tcg_temp_free_i32(tmp2);
9479 store_reg(s, rd, tmp);
9480 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9482 tmp = load_reg(s, rm);
9483 sh = (insn >> 16) & 0x1f;
9484 tmp2 = tcg_const_i32(sh);
9485 if (insn & (1 << 22))
9486 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9488 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9489 tcg_temp_free_i32(tmp2);
9490 store_reg(s, rd, tmp);
9491 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9493 tmp = load_reg(s, rn);
9494 tmp2 = load_reg(s, rm);
9495 tmp3 = tcg_temp_new_i32();
9496 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9497 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9498 tcg_temp_free_i32(tmp3);
9499 tcg_temp_free_i32(tmp2);
9500 store_reg(s, rd, tmp);
9501 } else if ((insn & 0x000003e0) == 0x00000060) {
9502 tmp = load_reg(s, rm);
9503 shift = (insn >> 10) & 3;
9504 /* ??? In many cases it's not necessary to do a
9505 rotate, a shift is sufficient. */
9507 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9508 op1 = (insn >> 20) & 7;
9510 case 0: gen_sxtb16(tmp); break;
9511 case 2: gen_sxtb(tmp); break;
9512 case 3: gen_sxth(tmp); break;
9513 case 4: gen_uxtb16(tmp); break;
9514 case 6: gen_uxtb(tmp); break;
9515 case 7: gen_uxth(tmp); break;
9516 default: goto illegal_op;
9519 tmp2 = load_reg(s, rn);
9520 if ((op1 & 3) == 0) {
9521 gen_add16(tmp, tmp2);
9523 tcg_gen_add_i32(tmp, tmp, tmp2);
9524 tcg_temp_free_i32(tmp2);
9527 store_reg(s, rd, tmp);
9528 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9530 tmp = load_reg(s, rm);
9531 if (insn & (1 << 22)) {
9532 if (insn & (1 << 7)) {
9536 gen_helper_rbit(tmp, tmp);
9539 if (insn & (1 << 7))
9542 tcg_gen_bswap32_i32(tmp, tmp);
9544 store_reg(s, rd, tmp);
9549 case 2: /* Multiplies (Type 3). */
9550 switch ((insn >> 20) & 0x7) {
9552 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9553 /* op2 not 00x or 11x : UNDEF */
9556 /* Signed multiply most significant [accumulate].
9557 (SMMUL, SMMLA, SMMLS) */
9558 tmp = load_reg(s, rm);
9559 tmp2 = load_reg(s, rs);
9560 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9563 tmp = load_reg(s, rd);
9564 if (insn & (1 << 6)) {
9565 tmp64 = gen_subq_msw(tmp64, tmp);
9567 tmp64 = gen_addq_msw(tmp64, tmp);
9570 if (insn & (1 << 5)) {
9571 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9573 tcg_gen_shri_i64(tmp64, tmp64, 32);
9574 tmp = tcg_temp_new_i32();
9575 tcg_gen_extrl_i64_i32(tmp, tmp64);
9576 tcg_temp_free_i64(tmp64);
9577 store_reg(s, rn, tmp);
9581 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9582 if (insn & (1 << 7)) {
9585 tmp = load_reg(s, rm);
9586 tmp2 = load_reg(s, rs);
9587 if (insn & (1 << 5))
9588 gen_swap_half(tmp2);
9589 gen_smul_dual(tmp, tmp2);
9590 if (insn & (1 << 22)) {
9591 /* smlald, smlsld */
9594 tmp64 = tcg_temp_new_i64();
9595 tmp64_2 = tcg_temp_new_i64();
9596 tcg_gen_ext_i32_i64(tmp64, tmp);
9597 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9598 tcg_temp_free_i32(tmp);
9599 tcg_temp_free_i32(tmp2);
9600 if (insn & (1 << 6)) {
9601 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9603 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9605 tcg_temp_free_i64(tmp64_2);
9606 gen_addq(s, tmp64, rd, rn);
9607 gen_storeq_reg(s, rd, rn, tmp64);
9608 tcg_temp_free_i64(tmp64);
9610 /* smuad, smusd, smlad, smlsd */
9611 if (insn & (1 << 6)) {
9612 /* This subtraction cannot overflow. */
9613 tcg_gen_sub_i32(tmp, tmp, tmp2);
9615 /* This addition cannot overflow 32 bits;
9616 * however it may overflow considered as a
9617 * signed operation, in which case we must set
9620 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9622 tcg_temp_free_i32(tmp2);
9625 tmp2 = load_reg(s, rd);
9626 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9627 tcg_temp_free_i32(tmp2);
9629 store_reg(s, rn, tmp);
9635 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9638 if (((insn >> 5) & 7) || (rd != 15)) {
9641 tmp = load_reg(s, rm);
9642 tmp2 = load_reg(s, rs);
9643 if (insn & (1 << 21)) {
9644 gen_helper_udiv(tmp, tmp, tmp2);
9646 gen_helper_sdiv(tmp, tmp, tmp2);
9648 tcg_temp_free_i32(tmp2);
9649 store_reg(s, rn, tmp);
9656 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9658 case 0: /* Unsigned sum of absolute differences. */
9660 tmp = load_reg(s, rm);
9661 tmp2 = load_reg(s, rs);
9662 gen_helper_usad8(tmp, tmp, tmp2);
9663 tcg_temp_free_i32(tmp2);
9665 tmp2 = load_reg(s, rd);
9666 tcg_gen_add_i32(tmp, tmp, tmp2);
9667 tcg_temp_free_i32(tmp2);
9669 store_reg(s, rn, tmp);
9671 case 0x20: case 0x24: case 0x28: case 0x2c:
9672 /* Bitfield insert/clear. */
9674 shift = (insn >> 7) & 0x1f;
9675 i = (insn >> 16) & 0x1f;
9677 /* UNPREDICTABLE; we choose to UNDEF */
9682 tmp = tcg_temp_new_i32();
9683 tcg_gen_movi_i32(tmp, 0);
9685 tmp = load_reg(s, rm);
9688 tmp2 = load_reg(s, rd);
9689 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9690 tcg_temp_free_i32(tmp2);
9692 store_reg(s, rd, tmp);
9694 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9695 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9697 tmp = load_reg(s, rm);
9698 shift = (insn >> 7) & 0x1f;
9699 i = ((insn >> 16) & 0x1f) + 1;
9704 tcg_gen_extract_i32(tmp, tmp, shift, i);
9706 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9709 store_reg(s, rd, tmp);
9719 /* Check for undefined extension instructions
9720 * per the ARM Bible IE:
9721 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9723 sh = (0xf << 20) | (0xf << 4);
9724 if (op1 == 0x7 && ((insn & sh) == sh))
9728 /* load/store byte/word */
9729 rn = (insn >> 16) & 0xf;
9730 rd = (insn >> 12) & 0xf;
9731 tmp2 = load_reg(s, rn);
9732 if ((insn & 0x01200000) == 0x00200000) {
9734 i = get_a32_user_mem_index(s);
9736 i = get_mem_index(s);
9738 if (insn & (1 << 24))
9739 gen_add_data_offset(s, insn, tmp2);
9740 if (insn & (1 << 20)) {
9742 tmp = tcg_temp_new_i32();
9743 if (insn & (1 << 22)) {
9744 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9746 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9750 tmp = load_reg(s, rd);
9751 if (insn & (1 << 22)) {
9752 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9754 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9756 tcg_temp_free_i32(tmp);
9758 if (!(insn & (1 << 24))) {
9759 gen_add_data_offset(s, insn, tmp2);
9760 store_reg(s, rn, tmp2);
9761 } else if (insn & (1 << 21)) {
9762 store_reg(s, rn, tmp2);
9764 tcg_temp_free_i32(tmp2);
9766 if (insn & (1 << 20)) {
9767 /* Complete the load. */
9768 store_reg_from_load(s, rd, tmp);
9774 int j, n, loaded_base;
9775 bool exc_return = false;
9776 bool is_load = extract32(insn, 20, 1);
9778 TCGv_i32 loaded_var;
9779 /* load/store multiple words */
9780 /* XXX: store correct base if write back */
9781 if (insn & (1 << 22)) {
9782 /* LDM (user), LDM (exception return) and STM (user) */
9784 goto illegal_op; /* only usable in supervisor mode */
9786 if (is_load && extract32(insn, 15, 1)) {
9792 rn = (insn >> 16) & 0xf;
9793 addr = load_reg(s, rn);
9795 /* compute total size */
9800 if (insn & (1 << i))
9803 /* XXX: test invalid n == 0 case ? */
9804 if (insn & (1 << 23)) {
9805 if (insn & (1 << 24)) {
9807 tcg_gen_addi_i32(addr, addr, 4);
9809 /* post increment */
9812 if (insn & (1 << 24)) {
9814 tcg_gen_addi_i32(addr, addr, -(n * 4));
9816 /* post decrement */
9818 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9823 if (insn & (1 << i)) {
9826 tmp = tcg_temp_new_i32();
9827 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9829 tmp2 = tcg_const_i32(i);
9830 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9831 tcg_temp_free_i32(tmp2);
9832 tcg_temp_free_i32(tmp);
9833 } else if (i == rn) {
9836 } else if (rn == 15 && exc_return) {
9837 store_pc_exc_ret(s, tmp);
9839 store_reg_from_load(s, i, tmp);
9844 /* special case: r15 = PC + 8 */
9845 val = (long)s->pc + 4;
9846 tmp = tcg_temp_new_i32();
9847 tcg_gen_movi_i32(tmp, val);
9849 tmp = tcg_temp_new_i32();
9850 tmp2 = tcg_const_i32(i);
9851 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9852 tcg_temp_free_i32(tmp2);
9854 tmp = load_reg(s, i);
9856 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9857 tcg_temp_free_i32(tmp);
9860 /* no need to add after the last transfer */
9862 tcg_gen_addi_i32(addr, addr, 4);
9865 if (insn & (1 << 21)) {
9867 if (insn & (1 << 23)) {
9868 if (insn & (1 << 24)) {
9871 /* post increment */
9872 tcg_gen_addi_i32(addr, addr, 4);
9875 if (insn & (1 << 24)) {
9878 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9880 /* post decrement */
9881 tcg_gen_addi_i32(addr, addr, -(n * 4));
9884 store_reg(s, rn, addr);
9886 tcg_temp_free_i32(addr);
9889 store_reg(s, rn, loaded_var);
9892 /* Restore CPSR from SPSR. */
9893 tmp = load_cpu_field(spsr);
9894 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9897 gen_helper_cpsr_write_eret(cpu_env, tmp);
9898 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9901 tcg_temp_free_i32(tmp);
9902 /* Must exit loop to check un-masked IRQs */
9903 s->base.is_jmp = DISAS_EXIT;
9912 /* branch (and link) */
9913 val = (int32_t)s->pc;
9914 if (insn & (1 << 24)) {
9915 tmp = tcg_temp_new_i32();
9916 tcg_gen_movi_i32(tmp, val);
9917 store_reg(s, 14, tmp);
9919 offset = sextract32(insn << 2, 0, 26);
9927 if (((insn >> 8) & 0xe) == 10) {
9929 if (disas_vfp_insn(s, insn)) {
9932 } else if (disas_coproc_insn(s, insn)) {
9939 gen_set_pc_im(s, s->pc);
9940 s->svc_imm = extract32(insn, 0, 24);
9941 s->base.is_jmp = DISAS_SWI;
9945 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9946 default_exception_el(s));
9952 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9954 /* Return true if this is a 16 bit instruction. We must be precise
9955 * about this (matching the decode). We assume that s->pc still
9956 * points to the first 16 bits of the insn.
9958 if ((insn >> 11) < 0x1d) {
9959 /* Definitely a 16-bit instruction */
9963 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9964 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9965 * end up actually treating this as two 16-bit insns, though,
9966 * if it's half of a bl/blx pair that might span a page boundary.
9968 if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
9969 /* Thumb2 cores (including all M profile ones) always treat
9970 * 32-bit insns as 32-bit.
9975 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
9976 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9977 * is not on the next page; we merge this into a 32-bit
9982 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9983 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9984 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9985 * -- handle as single 16 bit insn
9990 /* Return true if this is a Thumb-2 logical op. */
9992 thumb2_logic_op(int op)
9997 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9998 then set condition code flags based on the result of the operation.
9999 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10000 to the high bit of T1.
10001 Returns zero if the opcode is valid. */
10004 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10005 TCGv_i32 t0, TCGv_i32 t1)
10012 tcg_gen_and_i32(t0, t0, t1);
10016 tcg_gen_andc_i32(t0, t0, t1);
10020 tcg_gen_or_i32(t0, t0, t1);
10024 tcg_gen_orc_i32(t0, t0, t1);
10028 tcg_gen_xor_i32(t0, t0, t1);
10033 gen_add_CC(t0, t0, t1);
10035 tcg_gen_add_i32(t0, t0, t1);
10039 gen_adc_CC(t0, t0, t1);
10045 gen_sbc_CC(t0, t0, t1);
10047 gen_sub_carry(t0, t0, t1);
10052 gen_sub_CC(t0, t0, t1);
10054 tcg_gen_sub_i32(t0, t0, t1);
10058 gen_sub_CC(t0, t1, t0);
10060 tcg_gen_sub_i32(t0, t1, t0);
10062 default: /* 5, 6, 7, 9, 12, 15. */
10068 gen_set_CF_bit31(t1);
10073 /* Translate a 32-bit thumb instruction. */
10074 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
10076 uint32_t imm, shift, offset;
10077 uint32_t rd, rn, rm, rs;
10088 /* The only 32 bit insn that's allowed for Thumb1 is the combined
10089 * BL/BLX prefix and suffix.
10091 if ((insn & 0xf800e800) != 0xf000e800) {
10095 rn = (insn >> 16) & 0xf;
10096 rs = (insn >> 12) & 0xf;
10097 rd = (insn >> 8) & 0xf;
10099 switch ((insn >> 25) & 0xf) {
10100 case 0: case 1: case 2: case 3:
10101 /* 16-bit instructions. Should never happen. */
10104 if (insn & (1 << 22)) {
10105 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10106 * - load/store doubleword, load/store exclusive, ldacq/strel,
10107 * table branch, TT.
10109 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10110 arm_dc_feature(s, ARM_FEATURE_V8)) {
10111 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10113 * The bulk of the behaviour for this instruction is implemented
10114 * in v7m_handle_execute_nsc(), which deals with the insn when
10115 * it is executed by a CPU in non-secure state from memory
10116 * which is Secure & NonSecure-Callable.
10117 * Here we only need to handle the remaining cases:
10118 * * in NS memory (including the "security extension not
10119 * implemented" case) : NOP
10120 * * in S memory but CPU already secure (clear IT bits)
10121 * We know that the attribute for the memory this insn is
10122 * in must match the current CPU state, because otherwise
10123 * get_phys_addr_pmsav8 would have generated an exception.
10125 if (s->v8m_secure) {
10126 /* Like the IT insn, we don't need to generate any code */
10127 s->condexec_cond = 0;
10128 s->condexec_mask = 0;
10130 } else if (insn & 0x01200000) {
10131 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10132 * - load/store dual (post-indexed)
10133 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10134 * - load/store dual (literal and immediate)
10135 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10136 * - load/store dual (pre-indexed)
10139 if (insn & (1 << 21)) {
10140 /* UNPREDICTABLE */
10143 addr = tcg_temp_new_i32();
10144 tcg_gen_movi_i32(addr, s->pc & ~3);
10146 addr = load_reg(s, rn);
10148 offset = (insn & 0xff) * 4;
10149 if ((insn & (1 << 23)) == 0)
10151 if (insn & (1 << 24)) {
10152 tcg_gen_addi_i32(addr, addr, offset);
10155 if (insn & (1 << 20)) {
10157 tmp = tcg_temp_new_i32();
10158 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10159 store_reg(s, rs, tmp);
10160 tcg_gen_addi_i32(addr, addr, 4);
10161 tmp = tcg_temp_new_i32();
10162 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10163 store_reg(s, rd, tmp);
10166 tmp = load_reg(s, rs);
10167 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10168 tcg_temp_free_i32(tmp);
10169 tcg_gen_addi_i32(addr, addr, 4);
10170 tmp = load_reg(s, rd);
10171 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10172 tcg_temp_free_i32(tmp);
10174 if (insn & (1 << 21)) {
10175 /* Base writeback. */
10176 tcg_gen_addi_i32(addr, addr, offset - 4);
10177 store_reg(s, rn, addr);
10179 tcg_temp_free_i32(addr);
10181 } else if ((insn & (1 << 23)) == 0) {
10182 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10183 * - load/store exclusive word
10187 if (!(insn & (1 << 20)) &&
10188 arm_dc_feature(s, ARM_FEATURE_M) &&
10189 arm_dc_feature(s, ARM_FEATURE_V8)) {
10190 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10193 bool alt = insn & (1 << 7);
10194 TCGv_i32 addr, op, ttresp;
10196 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10197 /* we UNDEF for these UNPREDICTABLE cases */
10201 if (alt && !s->v8m_secure) {
10205 addr = load_reg(s, rn);
10206 op = tcg_const_i32(extract32(insn, 6, 2));
10207 ttresp = tcg_temp_new_i32();
10208 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10209 tcg_temp_free_i32(addr);
10210 tcg_temp_free_i32(op);
10211 store_reg(s, rd, ttresp);
10216 addr = tcg_temp_local_new_i32();
10217 load_reg_var(s, addr, rn);
10218 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
10219 if (insn & (1 << 20)) {
10220 gen_load_exclusive(s, rs, 15, addr, 2);
10222 gen_store_exclusive(s, rd, rs, 15, addr, 2);
10224 tcg_temp_free_i32(addr);
10225 } else if ((insn & (7 << 5)) == 0) {
10226 /* Table Branch. */
10228 addr = tcg_temp_new_i32();
10229 tcg_gen_movi_i32(addr, s->pc);
10231 addr = load_reg(s, rn);
10233 tmp = load_reg(s, rm);
10234 tcg_gen_add_i32(addr, addr, tmp);
10235 if (insn & (1 << 4)) {
10237 tcg_gen_add_i32(addr, addr, tmp);
10238 tcg_temp_free_i32(tmp);
10239 tmp = tcg_temp_new_i32();
10240 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
10242 tcg_temp_free_i32(tmp);
10243 tmp = tcg_temp_new_i32();
10244 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
10246 tcg_temp_free_i32(addr);
10247 tcg_gen_shli_i32(tmp, tmp, 1);
10248 tcg_gen_addi_i32(tmp, tmp, s->pc);
10249 store_reg(s, 15, tmp);
10251 int op2 = (insn >> 6) & 0x3;
10252 op = (insn >> 4) & 0x3;
10257 /* Load/store exclusive byte/halfword/doubleword */
10264 /* Load-acquire/store-release */
10270 /* Load-acquire/store-release exclusive */
10274 addr = tcg_temp_local_new_i32();
10275 load_reg_var(s, addr, rn);
10277 if (insn & (1 << 20)) {
10278 tmp = tcg_temp_new_i32();
10281 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10285 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10289 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10295 store_reg(s, rs, tmp);
10297 tmp = load_reg(s, rs);
10300 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10304 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10308 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10314 tcg_temp_free_i32(tmp);
10316 } else if (insn & (1 << 20)) {
10317 gen_load_exclusive(s, rs, rd, addr, op);
10319 gen_store_exclusive(s, rm, rs, rd, addr, op);
10321 tcg_temp_free_i32(addr);
10324 /* Load/store multiple, RFE, SRS. */
10325 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
10326 /* RFE, SRS: not available in user mode or on M profile */
10327 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10330 if (insn & (1 << 20)) {
10332 addr = load_reg(s, rn);
10333 if ((insn & (1 << 24)) == 0)
10334 tcg_gen_addi_i32(addr, addr, -8);
10335 /* Load PC into tmp and CPSR into tmp2. */
10336 tmp = tcg_temp_new_i32();
10337 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10338 tcg_gen_addi_i32(addr, addr, 4);
10339 tmp2 = tcg_temp_new_i32();
10340 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
10341 if (insn & (1 << 21)) {
10342 /* Base writeback. */
10343 if (insn & (1 << 24)) {
10344 tcg_gen_addi_i32(addr, addr, 4);
10346 tcg_gen_addi_i32(addr, addr, -4);
10348 store_reg(s, rn, addr);
10350 tcg_temp_free_i32(addr);
10352 gen_rfe(s, tmp, tmp2);
10355 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10359 int i, loaded_base = 0;
10360 TCGv_i32 loaded_var;
10361 /* Load/store multiple. */
10362 addr = load_reg(s, rn);
10364 for (i = 0; i < 16; i++) {
10365 if (insn & (1 << i))
10368 if (insn & (1 << 24)) {
10369 tcg_gen_addi_i32(addr, addr, -offset);
10373 for (i = 0; i < 16; i++) {
10374 if ((insn & (1 << i)) == 0)
10376 if (insn & (1 << 20)) {
10378 tmp = tcg_temp_new_i32();
10379 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10381 gen_bx_excret(s, tmp);
10382 } else if (i == rn) {
10386 store_reg(s, i, tmp);
10390 tmp = load_reg(s, i);
10391 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10392 tcg_temp_free_i32(tmp);
10394 tcg_gen_addi_i32(addr, addr, 4);
10397 store_reg(s, rn, loaded_var);
10399 if (insn & (1 << 21)) {
10400 /* Base register writeback. */
10401 if (insn & (1 << 24)) {
10402 tcg_gen_addi_i32(addr, addr, -offset);
10404 /* Fault if writeback register is in register list. */
10405 if (insn & (1 << rn))
10407 store_reg(s, rn, addr);
10409 tcg_temp_free_i32(addr);
10416 op = (insn >> 21) & 0xf;
10418 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10421 /* Halfword pack. */
10422 tmp = load_reg(s, rn);
10423 tmp2 = load_reg(s, rm);
10424 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10425 if (insn & (1 << 5)) {
10429 tcg_gen_sari_i32(tmp2, tmp2, shift);
10430 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10431 tcg_gen_ext16u_i32(tmp2, tmp2);
10435 tcg_gen_shli_i32(tmp2, tmp2, shift);
10436 tcg_gen_ext16u_i32(tmp, tmp);
10437 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10439 tcg_gen_or_i32(tmp, tmp, tmp2);
10440 tcg_temp_free_i32(tmp2);
10441 store_reg(s, rd, tmp);
10443 /* Data processing register constant shift. */
10445 tmp = tcg_temp_new_i32();
10446 tcg_gen_movi_i32(tmp, 0);
10448 tmp = load_reg(s, rn);
10450 tmp2 = load_reg(s, rm);
10452 shiftop = (insn >> 4) & 3;
10453 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10454 conds = (insn & (1 << 20)) != 0;
10455 logic_cc = (conds && thumb2_logic_op(op));
10456 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10457 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10459 tcg_temp_free_i32(tmp2);
10461 store_reg(s, rd, tmp);
10463 tcg_temp_free_i32(tmp);
10467 case 13: /* Misc data processing. */
10468 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10469 if (op < 4 && (insn & 0xf000) != 0xf000)
10472 case 0: /* Register controlled shift. */
10473 tmp = load_reg(s, rn);
10474 tmp2 = load_reg(s, rm);
10475 if ((insn & 0x70) != 0)
10477 op = (insn >> 21) & 3;
10478 logic_cc = (insn & (1 << 20)) != 0;
10479 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10482 store_reg(s, rd, tmp);
10484 case 1: /* Sign/zero extend. */
10485 op = (insn >> 20) & 7;
10487 case 0: /* SXTAH, SXTH */
10488 case 1: /* UXTAH, UXTH */
10489 case 4: /* SXTAB, SXTB */
10490 case 5: /* UXTAB, UXTB */
10492 case 2: /* SXTAB16, SXTB16 */
10493 case 3: /* UXTAB16, UXTB16 */
10494 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10502 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10506 tmp = load_reg(s, rm);
10507 shift = (insn >> 4) & 3;
10508 /* ??? In many cases it's not necessary to do a
10509 rotate, a shift is sufficient. */
10511 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
10512 op = (insn >> 20) & 7;
10514 case 0: gen_sxth(tmp); break;
10515 case 1: gen_uxth(tmp); break;
10516 case 2: gen_sxtb16(tmp); break;
10517 case 3: gen_uxtb16(tmp); break;
10518 case 4: gen_sxtb(tmp); break;
10519 case 5: gen_uxtb(tmp); break;
10521 g_assert_not_reached();
10524 tmp2 = load_reg(s, rn);
10525 if ((op >> 1) == 1) {
10526 gen_add16(tmp, tmp2);
10528 tcg_gen_add_i32(tmp, tmp, tmp2);
10529 tcg_temp_free_i32(tmp2);
10532 store_reg(s, rd, tmp);
10534 case 2: /* SIMD add/subtract. */
10535 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10538 op = (insn >> 20) & 7;
10539 shift = (insn >> 4) & 7;
10540 if ((op & 3) == 3 || (shift & 3) == 3)
10542 tmp = load_reg(s, rn);
10543 tmp2 = load_reg(s, rm);
10544 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
10545 tcg_temp_free_i32(tmp2);
10546 store_reg(s, rd, tmp);
10548 case 3: /* Other data processing. */
10549 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10551 /* Saturating add/subtract. */
10552 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10555 tmp = load_reg(s, rn);
10556 tmp2 = load_reg(s, rm);
10558 gen_helper_double_saturate(tmp, cpu_env, tmp);
10560 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
10562 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
10563 tcg_temp_free_i32(tmp2);
10566 case 0x0a: /* rbit */
10567 case 0x08: /* rev */
10568 case 0x09: /* rev16 */
10569 case 0x0b: /* revsh */
10570 case 0x18: /* clz */
10572 case 0x10: /* sel */
10573 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10577 case 0x20: /* crc32/crc32c */
10583 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10590 tmp = load_reg(s, rn);
10592 case 0x0a: /* rbit */
10593 gen_helper_rbit(tmp, tmp);
10595 case 0x08: /* rev */
10596 tcg_gen_bswap32_i32(tmp, tmp);
10598 case 0x09: /* rev16 */
10601 case 0x0b: /* revsh */
10604 case 0x10: /* sel */
10605 tmp2 = load_reg(s, rm);
10606 tmp3 = tcg_temp_new_i32();
10607 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10608 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10609 tcg_temp_free_i32(tmp3);
10610 tcg_temp_free_i32(tmp2);
10612 case 0x18: /* clz */
10613 tcg_gen_clzi_i32(tmp, tmp, 32);
10623 uint32_t sz = op & 0x3;
10624 uint32_t c = op & 0x8;
10626 tmp2 = load_reg(s, rm);
10628 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10629 } else if (sz == 1) {
10630 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10632 tmp3 = tcg_const_i32(1 << sz);
10634 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10636 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10638 tcg_temp_free_i32(tmp2);
10639 tcg_temp_free_i32(tmp3);
10643 g_assert_not_reached();
10646 store_reg(s, rd, tmp);
10648 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10649 switch ((insn >> 20) & 7) {
10650 case 0: /* 32 x 32 -> 32 */
10651 case 7: /* Unsigned sum of absolute differences. */
10653 case 1: /* 16 x 16 -> 32 */
10654 case 2: /* Dual multiply add. */
10655 case 3: /* 32 * 16 -> 32msb */
10656 case 4: /* Dual multiply subtract. */
10657 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10658 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10663 op = (insn >> 4) & 0xf;
10664 tmp = load_reg(s, rn);
10665 tmp2 = load_reg(s, rm);
10666 switch ((insn >> 20) & 7) {
10667 case 0: /* 32 x 32 -> 32 */
10668 tcg_gen_mul_i32(tmp, tmp, tmp2);
10669 tcg_temp_free_i32(tmp2);
10671 tmp2 = load_reg(s, rs);
10673 tcg_gen_sub_i32(tmp, tmp2, tmp);
10675 tcg_gen_add_i32(tmp, tmp, tmp2);
10676 tcg_temp_free_i32(tmp2);
10679 case 1: /* 16 x 16 -> 32 */
10680 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10681 tcg_temp_free_i32(tmp2);
10683 tmp2 = load_reg(s, rs);
10684 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10685 tcg_temp_free_i32(tmp2);
10688 case 2: /* Dual multiply add. */
10689 case 4: /* Dual multiply subtract. */
10691 gen_swap_half(tmp2);
10692 gen_smul_dual(tmp, tmp2);
10693 if (insn & (1 << 22)) {
10694 /* This subtraction cannot overflow. */
10695 tcg_gen_sub_i32(tmp, tmp, tmp2);
10697 /* This addition cannot overflow 32 bits;
10698 * however it may overflow considered as a signed
10699 * operation, in which case we must set the Q flag.
10701 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10703 tcg_temp_free_i32(tmp2);
10706 tmp2 = load_reg(s, rs);
10707 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10708 tcg_temp_free_i32(tmp2);
10711 case 3: /* 32 * 16 -> 32msb */
10713 tcg_gen_sari_i32(tmp2, tmp2, 16);
10716 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10717 tcg_gen_shri_i64(tmp64, tmp64, 16);
10718 tmp = tcg_temp_new_i32();
10719 tcg_gen_extrl_i64_i32(tmp, tmp64);
10720 tcg_temp_free_i64(tmp64);
10723 tmp2 = load_reg(s, rs);
10724 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10725 tcg_temp_free_i32(tmp2);
10728 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10729 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10731 tmp = load_reg(s, rs);
10732 if (insn & (1 << 20)) {
10733 tmp64 = gen_addq_msw(tmp64, tmp);
10735 tmp64 = gen_subq_msw(tmp64, tmp);
10738 if (insn & (1 << 4)) {
10739 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10741 tcg_gen_shri_i64(tmp64, tmp64, 32);
10742 tmp = tcg_temp_new_i32();
10743 tcg_gen_extrl_i64_i32(tmp, tmp64);
10744 tcg_temp_free_i64(tmp64);
10746 case 7: /* Unsigned sum of absolute differences. */
10747 gen_helper_usad8(tmp, tmp, tmp2);
10748 tcg_temp_free_i32(tmp2);
10750 tmp2 = load_reg(s, rs);
10751 tcg_gen_add_i32(tmp, tmp, tmp2);
10752 tcg_temp_free_i32(tmp2);
10756 store_reg(s, rd, tmp);
10758 case 6: case 7: /* 64-bit multiply, Divide. */
10759 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10760 tmp = load_reg(s, rn);
10761 tmp2 = load_reg(s, rm);
10762 if ((op & 0x50) == 0x10) {
10764 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10768 gen_helper_udiv(tmp, tmp, tmp2);
10770 gen_helper_sdiv(tmp, tmp, tmp2);
10771 tcg_temp_free_i32(tmp2);
10772 store_reg(s, rd, tmp);
10773 } else if ((op & 0xe) == 0xc) {
10774 /* Dual multiply accumulate long. */
10775 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10776 tcg_temp_free_i32(tmp);
10777 tcg_temp_free_i32(tmp2);
10781 gen_swap_half(tmp2);
10782 gen_smul_dual(tmp, tmp2);
10784 tcg_gen_sub_i32(tmp, tmp, tmp2);
10786 tcg_gen_add_i32(tmp, tmp, tmp2);
10788 tcg_temp_free_i32(tmp2);
10790 tmp64 = tcg_temp_new_i64();
10791 tcg_gen_ext_i32_i64(tmp64, tmp);
10792 tcg_temp_free_i32(tmp);
10793 gen_addq(s, tmp64, rs, rd);
10794 gen_storeq_reg(s, rs, rd, tmp64);
10795 tcg_temp_free_i64(tmp64);
10798 /* Unsigned 64-bit multiply */
10799 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10803 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10804 tcg_temp_free_i32(tmp2);
10805 tcg_temp_free_i32(tmp);
10808 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10809 tcg_temp_free_i32(tmp2);
10810 tmp64 = tcg_temp_new_i64();
10811 tcg_gen_ext_i32_i64(tmp64, tmp);
10812 tcg_temp_free_i32(tmp);
10814 /* Signed 64-bit multiply */
10815 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10820 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10821 tcg_temp_free_i64(tmp64);
10824 gen_addq_lo(s, tmp64, rs);
10825 gen_addq_lo(s, tmp64, rd);
10826 } else if (op & 0x40) {
10827 /* 64-bit accumulate. */
10828 gen_addq(s, tmp64, rs, rd);
10830 gen_storeq_reg(s, rs, rd, tmp64);
10831 tcg_temp_free_i64(tmp64);
10836 case 6: case 7: case 14: case 15:
10838 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10839 /* We don't currently implement M profile FP support,
10840 * so this entire space should give a NOCP fault, with
10841 * the exception of the v8M VLLDM and VLSTM insns, which
10842 * must be NOPs in Secure state and UNDEF in Nonsecure state.
10844 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10845 (insn & 0xffa00f00) == 0xec200a00) {
10846 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10848 * We choose to UNDEF if the RAZ bits are non-zero.
10850 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10853 /* Just NOP since FP support is not implemented */
10856 /* All other insns: NOCP */
10857 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10858 default_exception_el(s));
10861 if ((insn & 0xfe000a00) == 0xfc000800
10862 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10863 /* The Thumb2 and ARM encodings are identical. */
10864 if (disas_neon_insn_3same_ext(s, insn)) {
10867 } else if ((insn & 0xff000a00) == 0xfe000800
10868 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10869 /* The Thumb2 and ARM encodings are identical. */
10870 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10873 } else if (((insn >> 24) & 3) == 3) {
10874 /* Translate into the equivalent ARM encoding. */
10875 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10876 if (disas_neon_data_insn(s, insn)) {
10879 } else if (((insn >> 8) & 0xe) == 10) {
10880 if (disas_vfp_insn(s, insn)) {
10884 if (insn & (1 << 28))
10886 if (disas_coproc_insn(s, insn)) {
10891 case 8: case 9: case 10: case 11:
10892 if (insn & (1 << 15)) {
10893 /* Branches, misc control. */
10894 if (insn & 0x5000) {
10895 /* Unconditional branch. */
10896 /* signextend(hw1[10:0]) -> offset[:12]. */
10897 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10898 /* hw1[10:0] -> offset[11:1]. */
10899 offset |= (insn & 0x7ff) << 1;
10900 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10901 offset[24:22] already have the same value because of the
10902 sign extension above. */
10903 offset ^= ((~insn) & (1 << 13)) << 10;
10904 offset ^= ((~insn) & (1 << 11)) << 11;
10906 if (insn & (1 << 14)) {
10907 /* Branch and link. */
10908 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10912 if (insn & (1 << 12)) {
10914 gen_jmp(s, offset);
10917 offset &= ~(uint32_t)2;
10918 /* thumb2 bx, no need to check */
10919 gen_bx_im(s, offset);
10921 } else if (((insn >> 23) & 7) == 7) {
10923 if (insn & (1 << 13))
10926 if (insn & (1 << 26)) {
10927 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10930 if (!(insn & (1 << 20))) {
10931 /* Hypervisor call (v7) */
10932 int imm16 = extract32(insn, 16, 4) << 12
10933 | extract32(insn, 0, 12);
10940 /* Secure monitor call (v6+) */
10948 op = (insn >> 20) & 7;
10950 case 0: /* msr cpsr. */
10951 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10952 tmp = load_reg(s, rn);
10953 /* the constant is the mask and SYSm fields */
10954 addr = tcg_const_i32(insn & 0xfff);
10955 gen_helper_v7m_msr(cpu_env, addr, tmp);
10956 tcg_temp_free_i32(addr);
10957 tcg_temp_free_i32(tmp);
10962 case 1: /* msr spsr. */
10963 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10967 if (extract32(insn, 5, 1)) {
10969 int sysm = extract32(insn, 8, 4) |
10970 (extract32(insn, 4, 1) << 4);
10973 gen_msr_banked(s, r, sysm, rm);
10977 /* MSR (for PSRs) */
10978 tmp = load_reg(s, rn);
10980 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10984 case 2: /* cps, nop-hint. */
10985 if (((insn >> 8) & 7) == 0) {
10986 gen_nop_hint(s, insn & 0xff);
10988 /* Implemented as NOP in user mode. */
10993 if (insn & (1 << 10)) {
10994 if (insn & (1 << 7))
10996 if (insn & (1 << 6))
10998 if (insn & (1 << 5))
11000 if (insn & (1 << 9))
11001 imm = CPSR_A | CPSR_I | CPSR_F;
11003 if (insn & (1 << 8)) {
11005 imm |= (insn & 0x1f);
11008 gen_set_psr_im(s, offset, 0, imm);
11011 case 3: /* Special control operations. */
11013 op = (insn >> 4) & 0xf;
11015 case 2: /* clrex */
11020 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11023 /* We need to break the TB after this insn
11024 * to execute self-modifying code correctly
11025 * and also to take any pending interrupts
11028 gen_goto_tb(s, 0, s->pc & ~1);
11035 /* Trivial implementation equivalent to bx.
11036 * This instruction doesn't exist at all for M-profile.
11038 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11041 tmp = load_reg(s, rn);
11044 case 5: /* Exception return. */
11048 if (rn != 14 || rd != 15) {
11051 tmp = load_reg(s, rn);
11052 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11053 gen_exception_return(s, tmp);
11056 if (extract32(insn, 5, 1) &&
11057 !arm_dc_feature(s, ARM_FEATURE_M)) {
11059 int sysm = extract32(insn, 16, 4) |
11060 (extract32(insn, 4, 1) << 4);
11062 gen_mrs_banked(s, 0, sysm, rd);
11066 if (extract32(insn, 16, 4) != 0xf) {
11069 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11070 extract32(insn, 0, 8) != 0) {
11075 tmp = tcg_temp_new_i32();
11076 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11077 addr = tcg_const_i32(insn & 0xff);
11078 gen_helper_v7m_mrs(tmp, cpu_env, addr);
11079 tcg_temp_free_i32(addr);
11081 gen_helper_cpsr_read(tmp, cpu_env);
11083 store_reg(s, rd, tmp);
11086 if (extract32(insn, 5, 1) &&
11087 !arm_dc_feature(s, ARM_FEATURE_M)) {
11089 int sysm = extract32(insn, 16, 4) |
11090 (extract32(insn, 4, 1) << 4);
11092 gen_mrs_banked(s, 1, sysm, rd);
11097 /* Not accessible in user mode. */
11098 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
11102 if (extract32(insn, 16, 4) != 0xf ||
11103 extract32(insn, 0, 8) != 0) {
11107 tmp = load_cpu_field(spsr);
11108 store_reg(s, rd, tmp);
11113 /* Conditional branch. */
11114 op = (insn >> 22) & 0xf;
11115 /* Generate a conditional jump to next instruction. */
11116 s->condlabel = gen_new_label();
11117 arm_gen_test_cc(op ^ 1, s->condlabel);
11120 /* offset[11:1] = insn[10:0] */
11121 offset = (insn & 0x7ff) << 1;
11122 /* offset[17:12] = insn[21:16]. */
11123 offset |= (insn & 0x003f0000) >> 4;
11124 /* offset[31:20] = insn[26]. */
11125 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11126 /* offset[18] = insn[13]. */
11127 offset |= (insn & (1 << 13)) << 5;
11128 /* offset[19] = insn[11]. */
11129 offset |= (insn & (1 << 11)) << 8;
11131 /* jump to the offset */
11132 gen_jmp(s, s->pc + offset);
11135 /* Data processing immediate. */
11136 if (insn & (1 << 25)) {
11137 if (insn & (1 << 24)) {
11138 if (insn & (1 << 20))
11140 /* Bitfield/Saturate. */
11141 op = (insn >> 21) & 7;
11143 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11145 tmp = tcg_temp_new_i32();
11146 tcg_gen_movi_i32(tmp, 0);
11148 tmp = load_reg(s, rn);
11151 case 2: /* Signed bitfield extract. */
11153 if (shift + imm > 32)
11156 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11159 case 6: /* Unsigned bitfield extract. */
11161 if (shift + imm > 32)
11164 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11167 case 3: /* Bitfield insert/clear. */
11170 imm = imm + 1 - shift;
11172 tmp2 = load_reg(s, rd);
11173 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
11174 tcg_temp_free_i32(tmp2);
11179 default: /* Saturate. */
11182 tcg_gen_sari_i32(tmp, tmp, shift);
11184 tcg_gen_shli_i32(tmp, tmp, shift);
11186 tmp2 = tcg_const_i32(imm);
11189 if ((op & 1) && shift == 0) {
11190 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11191 tcg_temp_free_i32(tmp);
11192 tcg_temp_free_i32(tmp2);
11195 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
11197 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
11201 if ((op & 1) && shift == 0) {
11202 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11203 tcg_temp_free_i32(tmp);
11204 tcg_temp_free_i32(tmp2);
11207 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
11209 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
11212 tcg_temp_free_i32(tmp2);
11215 store_reg(s, rd, tmp);
11217 imm = ((insn & 0x04000000) >> 15)
11218 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11219 if (insn & (1 << 22)) {
11220 /* 16-bit immediate. */
11221 imm |= (insn >> 4) & 0xf000;
11222 if (insn & (1 << 23)) {
11224 tmp = load_reg(s, rd);
11225 tcg_gen_ext16u_i32(tmp, tmp);
11226 tcg_gen_ori_i32(tmp, tmp, imm << 16);
11229 tmp = tcg_temp_new_i32();
11230 tcg_gen_movi_i32(tmp, imm);
11233 /* Add/sub 12-bit immediate. */
11235 offset = s->pc & ~(uint32_t)3;
11236 if (insn & (1 << 23))
11240 tmp = tcg_temp_new_i32();
11241 tcg_gen_movi_i32(tmp, offset);
11243 tmp = load_reg(s, rn);
11244 if (insn & (1 << 23))
11245 tcg_gen_subi_i32(tmp, tmp, imm);
11247 tcg_gen_addi_i32(tmp, tmp, imm);
11250 store_reg(s, rd, tmp);
11253 int shifter_out = 0;
11254 /* modified 12-bit immediate. */
11255 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11256 imm = (insn & 0xff);
11259 /* Nothing to do. */
11261 case 1: /* 00XY00XY */
11264 case 2: /* XY00XY00 */
11268 case 3: /* XYXYXYXY */
11272 default: /* Rotated constant. */
11273 shift = (shift << 1) | (imm >> 7);
11275 imm = imm << (32 - shift);
11279 tmp2 = tcg_temp_new_i32();
11280 tcg_gen_movi_i32(tmp2, imm);
11281 rn = (insn >> 16) & 0xf;
11283 tmp = tcg_temp_new_i32();
11284 tcg_gen_movi_i32(tmp, 0);
11286 tmp = load_reg(s, rn);
11288 op = (insn >> 21) & 0xf;
11289 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
11290 shifter_out, tmp, tmp2))
11292 tcg_temp_free_i32(tmp2);
11293 rd = (insn >> 8) & 0xf;
11295 store_reg(s, rd, tmp);
11297 tcg_temp_free_i32(tmp);
11302 case 12: /* Load/store single data item. */
11309 if ((insn & 0x01100000) == 0x01000000) {
11310 if (disas_neon_ls_insn(s, insn)) {
11315 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11317 if (!(insn & (1 << 20))) {
11321 /* Byte or halfword load space with dest == r15 : memory hints.
11322 * Catch them early so we don't emit pointless addressing code.
11323 * This space is a mix of:
11324 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11325 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11327 * unallocated hints, which must be treated as NOPs
11328 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11329 * which is easiest for the decoding logic
11330 * Some space which must UNDEF
11332 int op1 = (insn >> 23) & 3;
11333 int op2 = (insn >> 6) & 0x3f;
11338 /* UNPREDICTABLE, unallocated hint or
11339 * PLD/PLDW/PLI (literal)
11344 return; /* PLD/PLDW/PLI or unallocated hint */
11346 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
11347 return; /* PLD/PLDW/PLI or unallocated hint */
11349 /* UNDEF space, or an UNPREDICTABLE */
11353 memidx = get_mem_index(s);
11355 addr = tcg_temp_new_i32();
11357 /* s->pc has already been incremented by 4. */
11358 imm = s->pc & 0xfffffffc;
11359 if (insn & (1 << 23))
11360 imm += insn & 0xfff;
11362 imm -= insn & 0xfff;
11363 tcg_gen_movi_i32(addr, imm);
11365 addr = load_reg(s, rn);
11366 if (insn & (1 << 23)) {
11367 /* Positive offset. */
11368 imm = insn & 0xfff;
11369 tcg_gen_addi_i32(addr, addr, imm);
11372 switch ((insn >> 8) & 0xf) {
11373 case 0x0: /* Shifted Register. */
11374 shift = (insn >> 4) & 0xf;
11376 tcg_temp_free_i32(addr);
11379 tmp = load_reg(s, rm);
11381 tcg_gen_shli_i32(tmp, tmp, shift);
11382 tcg_gen_add_i32(addr, addr, tmp);
11383 tcg_temp_free_i32(tmp);
11385 case 0xc: /* Negative offset. */
11386 tcg_gen_addi_i32(addr, addr, -imm);
11388 case 0xe: /* User privilege. */
11389 tcg_gen_addi_i32(addr, addr, imm);
11390 memidx = get_a32_user_mem_index(s);
11392 case 0x9: /* Post-decrement. */
11394 /* Fall through. */
11395 case 0xb: /* Post-increment. */
11399 case 0xd: /* Pre-decrement. */
11401 /* Fall through. */
11402 case 0xf: /* Pre-increment. */
11403 tcg_gen_addi_i32(addr, addr, imm);
11407 tcg_temp_free_i32(addr);
11413 issinfo = writeback ? ISSInvalid : rs;
11415 if (insn & (1 << 20)) {
11417 tmp = tcg_temp_new_i32();
11420 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
11423 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
11426 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
11429 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
11432 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
11435 tcg_temp_free_i32(tmp);
11436 tcg_temp_free_i32(addr);
11440 gen_bx_excret(s, tmp);
11442 store_reg(s, rs, tmp);
11446 tmp = load_reg(s, rs);
11449 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
11452 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
11455 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
11458 tcg_temp_free_i32(tmp);
11459 tcg_temp_free_i32(addr);
11462 tcg_temp_free_i32(tmp);
11465 tcg_gen_addi_i32(addr, addr, imm);
11467 store_reg(s, rn, addr);
11469 tcg_temp_free_i32(addr);
11478 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11479 default_exception_el(s));
11482 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
11484 uint32_t val, op, rm, rn, rd, shift, cond;
11491 switch (insn >> 12) {
11495 op = (insn >> 11) & 3;
11498 rn = (insn >> 3) & 7;
11499 tmp = load_reg(s, rn);
11500 if (insn & (1 << 10)) {
11502 tmp2 = tcg_temp_new_i32();
11503 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
11506 rm = (insn >> 6) & 7;
11507 tmp2 = load_reg(s, rm);
11509 if (insn & (1 << 9)) {
11510 if (s->condexec_mask)
11511 tcg_gen_sub_i32(tmp, tmp, tmp2);
11513 gen_sub_CC(tmp, tmp, tmp2);
11515 if (s->condexec_mask)
11516 tcg_gen_add_i32(tmp, tmp, tmp2);
11518 gen_add_CC(tmp, tmp, tmp2);
11520 tcg_temp_free_i32(tmp2);
11521 store_reg(s, rd, tmp);
11523 /* shift immediate */
11524 rm = (insn >> 3) & 7;
11525 shift = (insn >> 6) & 0x1f;
11526 tmp = load_reg(s, rm);
11527 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11528 if (!s->condexec_mask)
11530 store_reg(s, rd, tmp);
11534 /* arithmetic large immediate */
11535 op = (insn >> 11) & 3;
11536 rd = (insn >> 8) & 0x7;
11537 if (op == 0) { /* mov */
11538 tmp = tcg_temp_new_i32();
11539 tcg_gen_movi_i32(tmp, insn & 0xff);
11540 if (!s->condexec_mask)
11542 store_reg(s, rd, tmp);
11544 tmp = load_reg(s, rd);
11545 tmp2 = tcg_temp_new_i32();
11546 tcg_gen_movi_i32(tmp2, insn & 0xff);
11549 gen_sub_CC(tmp, tmp, tmp2);
11550 tcg_temp_free_i32(tmp);
11551 tcg_temp_free_i32(tmp2);
11554 if (s->condexec_mask)
11555 tcg_gen_add_i32(tmp, tmp, tmp2);
11557 gen_add_CC(tmp, tmp, tmp2);
11558 tcg_temp_free_i32(tmp2);
11559 store_reg(s, rd, tmp);
11562 if (s->condexec_mask)
11563 tcg_gen_sub_i32(tmp, tmp, tmp2);
11565 gen_sub_CC(tmp, tmp, tmp2);
11566 tcg_temp_free_i32(tmp2);
11567 store_reg(s, rd, tmp);
11573 if (insn & (1 << 11)) {
11574 rd = (insn >> 8) & 7;
11575 /* load pc-relative. Bit 1 of PC is ignored. */
11576 val = s->pc + 2 + ((insn & 0xff) * 4);
11577 val &= ~(uint32_t)2;
11578 addr = tcg_temp_new_i32();
11579 tcg_gen_movi_i32(addr, val);
11580 tmp = tcg_temp_new_i32();
11581 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11583 tcg_temp_free_i32(addr);
11584 store_reg(s, rd, tmp);
11587 if (insn & (1 << 10)) {
11588 /* 0b0100_01xx_xxxx_xxxx
11589 * - data processing extended, branch and exchange
11591 rd = (insn & 7) | ((insn >> 4) & 8);
11592 rm = (insn >> 3) & 0xf;
11593 op = (insn >> 8) & 3;
11596 tmp = load_reg(s, rd);
11597 tmp2 = load_reg(s, rm);
11598 tcg_gen_add_i32(tmp, tmp, tmp2);
11599 tcg_temp_free_i32(tmp2);
11600 store_reg(s, rd, tmp);
11603 tmp = load_reg(s, rd);
11604 tmp2 = load_reg(s, rm);
11605 gen_sub_CC(tmp, tmp, tmp2);
11606 tcg_temp_free_i32(tmp2);
11607 tcg_temp_free_i32(tmp);
11609 case 2: /* mov/cpy */
11610 tmp = load_reg(s, rm);
11611 store_reg(s, rd, tmp);
11615 /* 0b0100_0111_xxxx_xxxx
11616 * - branch [and link] exchange thumb register
11618 bool link = insn & (1 << 7);
11627 /* BXNS/BLXNS: only exists for v8M with the
11628 * security extensions, and always UNDEF if NonSecure.
11629 * We don't implement these in the user-only mode
11630 * either (in theory you can use them from Secure User
11631 * mode but they are too tied in to system emulation.)
11633 if (!s->v8m_secure || IS_USER_ONLY) {
11644 tmp = load_reg(s, rm);
11646 val = (uint32_t)s->pc | 1;
11647 tmp2 = tcg_temp_new_i32();
11648 tcg_gen_movi_i32(tmp2, val);
11649 store_reg(s, 14, tmp2);
11652 /* Only BX works as exception-return, not BLX */
11653 gen_bx_excret(s, tmp);
11661 /* data processing register */
11663 rm = (insn >> 3) & 7;
11664 op = (insn >> 6) & 0xf;
11665 if (op == 2 || op == 3 || op == 4 || op == 7) {
11666 /* the shift/rotate ops want the operands backwards */
11675 if (op == 9) { /* neg */
11676 tmp = tcg_temp_new_i32();
11677 tcg_gen_movi_i32(tmp, 0);
11678 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11679 tmp = load_reg(s, rd);
11684 tmp2 = load_reg(s, rm);
11686 case 0x0: /* and */
11687 tcg_gen_and_i32(tmp, tmp, tmp2);
11688 if (!s->condexec_mask)
11691 case 0x1: /* eor */
11692 tcg_gen_xor_i32(tmp, tmp, tmp2);
11693 if (!s->condexec_mask)
11696 case 0x2: /* lsl */
11697 if (s->condexec_mask) {
11698 gen_shl(tmp2, tmp2, tmp);
11700 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11701 gen_logic_CC(tmp2);
11704 case 0x3: /* lsr */
11705 if (s->condexec_mask) {
11706 gen_shr(tmp2, tmp2, tmp);
11708 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11709 gen_logic_CC(tmp2);
11712 case 0x4: /* asr */
11713 if (s->condexec_mask) {
11714 gen_sar(tmp2, tmp2, tmp);
11716 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11717 gen_logic_CC(tmp2);
11720 case 0x5: /* adc */
11721 if (s->condexec_mask) {
11722 gen_adc(tmp, tmp2);
11724 gen_adc_CC(tmp, tmp, tmp2);
11727 case 0x6: /* sbc */
11728 if (s->condexec_mask) {
11729 gen_sub_carry(tmp, tmp, tmp2);
11731 gen_sbc_CC(tmp, tmp, tmp2);
11734 case 0x7: /* ror */
11735 if (s->condexec_mask) {
11736 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11737 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11739 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11740 gen_logic_CC(tmp2);
11743 case 0x8: /* tst */
11744 tcg_gen_and_i32(tmp, tmp, tmp2);
11748 case 0x9: /* neg */
11749 if (s->condexec_mask)
11750 tcg_gen_neg_i32(tmp, tmp2);
11752 gen_sub_CC(tmp, tmp, tmp2);
11754 case 0xa: /* cmp */
11755 gen_sub_CC(tmp, tmp, tmp2);
11758 case 0xb: /* cmn */
11759 gen_add_CC(tmp, tmp, tmp2);
11762 case 0xc: /* orr */
11763 tcg_gen_or_i32(tmp, tmp, tmp2);
11764 if (!s->condexec_mask)
11767 case 0xd: /* mul */
11768 tcg_gen_mul_i32(tmp, tmp, tmp2);
11769 if (!s->condexec_mask)
11772 case 0xe: /* bic */
11773 tcg_gen_andc_i32(tmp, tmp, tmp2);
11774 if (!s->condexec_mask)
11777 case 0xf: /* mvn */
11778 tcg_gen_not_i32(tmp2, tmp2);
11779 if (!s->condexec_mask)
11780 gen_logic_CC(tmp2);
11787 store_reg(s, rm, tmp2);
11789 tcg_temp_free_i32(tmp);
11791 store_reg(s, rd, tmp);
11792 tcg_temp_free_i32(tmp2);
11795 tcg_temp_free_i32(tmp);
11796 tcg_temp_free_i32(tmp2);
11801 /* load/store register offset. */
11803 rn = (insn >> 3) & 7;
11804 rm = (insn >> 6) & 7;
11805 op = (insn >> 9) & 7;
11806 addr = load_reg(s, rn);
11807 tmp = load_reg(s, rm);
11808 tcg_gen_add_i32(addr, addr, tmp);
11809 tcg_temp_free_i32(tmp);
11811 if (op < 3) { /* store */
11812 tmp = load_reg(s, rd);
11814 tmp = tcg_temp_new_i32();
11819 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11822 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11825 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11827 case 3: /* ldrsb */
11828 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11831 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11834 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11837 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11839 case 7: /* ldrsh */
11840 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11843 if (op >= 3) { /* load */
11844 store_reg(s, rd, tmp);
11846 tcg_temp_free_i32(tmp);
11848 tcg_temp_free_i32(addr);
11852 /* load/store word immediate offset */
11854 rn = (insn >> 3) & 7;
11855 addr = load_reg(s, rn);
11856 val = (insn >> 4) & 0x7c;
11857 tcg_gen_addi_i32(addr, addr, val);
11859 if (insn & (1 << 11)) {
11861 tmp = tcg_temp_new_i32();
11862 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11863 store_reg(s, rd, tmp);
11866 tmp = load_reg(s, rd);
11867 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11868 tcg_temp_free_i32(tmp);
11870 tcg_temp_free_i32(addr);
11874 /* load/store byte immediate offset */
11876 rn = (insn >> 3) & 7;
11877 addr = load_reg(s, rn);
11878 val = (insn >> 6) & 0x1f;
11879 tcg_gen_addi_i32(addr, addr, val);
11881 if (insn & (1 << 11)) {
11883 tmp = tcg_temp_new_i32();
11884 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11885 store_reg(s, rd, tmp);
11888 tmp = load_reg(s, rd);
11889 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11890 tcg_temp_free_i32(tmp);
11892 tcg_temp_free_i32(addr);
11896 /* load/store halfword immediate offset */
11898 rn = (insn >> 3) & 7;
11899 addr = load_reg(s, rn);
11900 val = (insn >> 5) & 0x3e;
11901 tcg_gen_addi_i32(addr, addr, val);
11903 if (insn & (1 << 11)) {
11905 tmp = tcg_temp_new_i32();
11906 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11907 store_reg(s, rd, tmp);
11910 tmp = load_reg(s, rd);
11911 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11912 tcg_temp_free_i32(tmp);
11914 tcg_temp_free_i32(addr);
11918 /* load/store from stack */
11919 rd = (insn >> 8) & 7;
11920 addr = load_reg(s, 13);
11921 val = (insn & 0xff) * 4;
11922 tcg_gen_addi_i32(addr, addr, val);
11924 if (insn & (1 << 11)) {
11926 tmp = tcg_temp_new_i32();
11927 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11928 store_reg(s, rd, tmp);
11931 tmp = load_reg(s, rd);
11932 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11933 tcg_temp_free_i32(tmp);
11935 tcg_temp_free_i32(addr);
11939 /* add to high reg */
11940 rd = (insn >> 8) & 7;
11941 if (insn & (1 << 11)) {
11943 tmp = load_reg(s, 13);
11945 /* PC. bit 1 is ignored. */
11946 tmp = tcg_temp_new_i32();
11947 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11949 val = (insn & 0xff) * 4;
11950 tcg_gen_addi_i32(tmp, tmp, val);
11951 store_reg(s, rd, tmp);
11956 op = (insn >> 8) & 0xf;
11959 /* adjust stack pointer */
11960 tmp = load_reg(s, 13);
11961 val = (insn & 0x7f) * 4;
11962 if (insn & (1 << 7))
11963 val = -(int32_t)val;
11964 tcg_gen_addi_i32(tmp, tmp, val);
11965 store_reg(s, 13, tmp);
11968 case 2: /* sign/zero extend. */
11971 rm = (insn >> 3) & 7;
11972 tmp = load_reg(s, rm);
11973 switch ((insn >> 6) & 3) {
11974 case 0: gen_sxth(tmp); break;
11975 case 1: gen_sxtb(tmp); break;
11976 case 2: gen_uxth(tmp); break;
11977 case 3: gen_uxtb(tmp); break;
11979 store_reg(s, rd, tmp);
11981 case 4: case 5: case 0xc: case 0xd:
11983 addr = load_reg(s, 13);
11984 if (insn & (1 << 8))
11988 for (i = 0; i < 8; i++) {
11989 if (insn & (1 << i))
11992 if ((insn & (1 << 11)) == 0) {
11993 tcg_gen_addi_i32(addr, addr, -offset);
11995 for (i = 0; i < 8; i++) {
11996 if (insn & (1 << i)) {
11997 if (insn & (1 << 11)) {
11999 tmp = tcg_temp_new_i32();
12000 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12001 store_reg(s, i, tmp);
12004 tmp = load_reg(s, i);
12005 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12006 tcg_temp_free_i32(tmp);
12008 /* advance to the next address. */
12009 tcg_gen_addi_i32(addr, addr, 4);
12013 if (insn & (1 << 8)) {
12014 if (insn & (1 << 11)) {
12016 tmp = tcg_temp_new_i32();
12017 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12018 /* don't set the pc until the rest of the instruction
12022 tmp = load_reg(s, 14);
12023 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12024 tcg_temp_free_i32(tmp);
12026 tcg_gen_addi_i32(addr, addr, 4);
12028 if ((insn & (1 << 11)) == 0) {
12029 tcg_gen_addi_i32(addr, addr, -offset);
12031 /* write back the new stack pointer */
12032 store_reg(s, 13, addr);
12033 /* set the new PC value */
12034 if ((insn & 0x0900) == 0x0900) {
12035 store_reg_from_load(s, 15, tmp);
12039 case 1: case 3: case 9: case 11: /* czb */
12041 tmp = load_reg(s, rm);
12042 s->condlabel = gen_new_label();
12044 if (insn & (1 << 11))
12045 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
12047 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
12048 tcg_temp_free_i32(tmp);
12049 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12050 val = (uint32_t)s->pc + 2;
12055 case 15: /* IT, nop-hint. */
12056 if ((insn & 0xf) == 0) {
12057 gen_nop_hint(s, (insn >> 4) & 0xf);
12061 s->condexec_cond = (insn >> 4) & 0xe;
12062 s->condexec_mask = insn & 0x1f;
12063 /* No actual code generated for this insn, just setup state. */
12066 case 0xe: /* bkpt */
12068 int imm8 = extract32(insn, 0, 8);
12070 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
12074 case 0xa: /* rev, and hlt */
12076 int op1 = extract32(insn, 6, 2);
12080 int imm6 = extract32(insn, 0, 6);
12086 /* Otherwise this is rev */
12088 rn = (insn >> 3) & 0x7;
12090 tmp = load_reg(s, rn);
12092 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
12093 case 1: gen_rev16(tmp); break;
12094 case 3: gen_revsh(tmp); break;
12096 g_assert_not_reached();
12098 store_reg(s, rd, tmp);
12103 switch ((insn >> 5) & 7) {
12107 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12108 gen_helper_setend(cpu_env);
12109 s->base.is_jmp = DISAS_UPDATE;
12118 if (arm_dc_feature(s, ARM_FEATURE_M)) {
12119 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12122 addr = tcg_const_i32(19);
12123 gen_helper_v7m_msr(cpu_env, addr, tmp);
12124 tcg_temp_free_i32(addr);
12128 addr = tcg_const_i32(16);
12129 gen_helper_v7m_msr(cpu_env, addr, tmp);
12130 tcg_temp_free_i32(addr);
12132 tcg_temp_free_i32(tmp);
12135 if (insn & (1 << 4)) {
12136 shift = CPSR_A | CPSR_I | CPSR_F;
12140 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
12155 /* load/store multiple */
12156 TCGv_i32 loaded_var = NULL;
12157 rn = (insn >> 8) & 0x7;
12158 addr = load_reg(s, rn);
12159 for (i = 0; i < 8; i++) {
12160 if (insn & (1 << i)) {
12161 if (insn & (1 << 11)) {
12163 tmp = tcg_temp_new_i32();
12164 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12168 store_reg(s, i, tmp);
12172 tmp = load_reg(s, i);
12173 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12174 tcg_temp_free_i32(tmp);
12176 /* advance to the next address */
12177 tcg_gen_addi_i32(addr, addr, 4);
12180 if ((insn & (1 << rn)) == 0) {
12181 /* base reg not in list: base register writeback */
12182 store_reg(s, rn, addr);
12184 /* base reg in list: if load, complete it now */
12185 if (insn & (1 << 11)) {
12186 store_reg(s, rn, loaded_var);
12188 tcg_temp_free_i32(addr);
12193 /* conditional branch or swi */
12194 cond = (insn >> 8) & 0xf;
12200 gen_set_pc_im(s, s->pc);
12201 s->svc_imm = extract32(insn, 0, 8);
12202 s->base.is_jmp = DISAS_SWI;
12205 /* generate a conditional jump to next instruction */
12206 s->condlabel = gen_new_label();
12207 arm_gen_test_cc(cond ^ 1, s->condlabel);
12210 /* jump to the offset */
12211 val = (uint32_t)s->pc + 2;
12212 offset = ((int32_t)insn << 24) >> 24;
12213 val += offset << 1;
12218 if (insn & (1 << 11)) {
12219 /* thumb_insn_is_16bit() ensures we can't get here for
12220 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12221 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12223 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12225 offset = ((insn & 0x7ff) << 1);
12226 tmp = load_reg(s, 14);
12227 tcg_gen_addi_i32(tmp, tmp, offset);
12228 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12230 tmp2 = tcg_temp_new_i32();
12231 tcg_gen_movi_i32(tmp2, s->pc | 1);
12232 store_reg(s, 14, tmp2);
12236 /* unconditional branch */
12237 val = (uint32_t)s->pc;
12238 offset = ((int32_t)insn << 21) >> 21;
12239 val += (offset << 1) + 2;
12244 /* thumb_insn_is_16bit() ensures we can't get here for
12245 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12247 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12249 if (insn & (1 << 11)) {
12250 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12251 offset = ((insn & 0x7ff) << 1) | 1;
12252 tmp = load_reg(s, 14);
12253 tcg_gen_addi_i32(tmp, tmp, offset);
12255 tmp2 = tcg_temp_new_i32();
12256 tcg_gen_movi_i32(tmp2, s->pc | 1);
12257 store_reg(s, 14, tmp2);
12260 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12261 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12263 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12270 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12271 default_exception_el(s));
12274 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12276 /* Return true if the insn at dc->pc might cross a page boundary.
12277 * (False positives are OK, false negatives are not.)
12278 * We know this is a Thumb insn, and our caller ensures we are
12279 * only called if dc->pc is less than 4 bytes from the page
12280 * boundary, so we cross the page if the first 16 bits indicate
12281 * that this is a 32 bit insn.
12283 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
12285 return !thumb_insn_is_16bit(s, insn);
12288 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
12290 DisasContext *dc = container_of(dcbase, DisasContext, base);
12291 CPUARMState *env = cs->env_ptr;
12292 ARMCPU *cpu = arm_env_get_cpu(env);
12294 dc->pc = dc->base.pc_first;
12298 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12299 * there is no secure EL1, so we route exceptions to EL3.
12301 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12302 !arm_el_is_aa64(env, 3);
12303 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12304 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12305 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12306 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12307 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12308 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
12309 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
12310 #if !defined(CONFIG_USER_ONLY)
12311 dc->user = (dc->current_el == 0);
12313 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12314 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12315 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12316 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12317 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12318 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12319 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
12320 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12321 regime_is_secure(env, dc->mmu_idx);
12322 dc->cp_regs = cpu->cp_regs;
12323 dc->features = env->features;
12325 /* Single step state. The code-generation logic here is:
12327 * generate code with no special handling for single-stepping (except
12328 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12329 * this happens anyway because those changes are all system register or
12331 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12332 * emit code for one insn
12333 * emit code to clear PSTATE.SS
12334 * emit code to generate software step exception for completed step
12335 * end TB (as usual for having generated an exception)
12336 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12337 * emit code to generate a software step exception
12340 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12341 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
12342 dc->is_ldex = false;
12343 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12345 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
12347 /* If architectural single step active, limit to 1. */
12348 if (is_singlestepping(dc)) {
12349 dc->base.max_insns = 1;
12352 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12353 to those left on the page. */
12355 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
12356 dc->base.max_insns = MIN(dc->base.max_insns, bound);
12359 cpu_F0s = tcg_temp_new_i32();
12360 cpu_F1s = tcg_temp_new_i32();
12361 cpu_F0d = tcg_temp_new_i64();
12362 cpu_F1d = tcg_temp_new_i64();
12365 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
12366 cpu_M0 = tcg_temp_new_i64();
12369 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12371 DisasContext *dc = container_of(dcbase, DisasContext, base);
12373 /* A note on handling of the condexec (IT) bits:
12375 * We want to avoid the overhead of having to write the updated condexec
12376 * bits back to the CPUARMState for every instruction in an IT block. So:
12377 * (1) if the condexec bits are not already zero then we write
12378 * zero back into the CPUARMState now. This avoids complications trying
12379 * to do it at the end of the block. (For example if we don't do this
12380 * it's hard to identify whether we can safely skip writing condexec
12381 * at the end of the TB, which we definitely want to do for the case
12382 * where a TB doesn't do anything with the IT state at all.)
12383 * (2) if we are going to leave the TB then we call gen_set_condexec()
12384 * which will write the correct value into CPUARMState if zero is wrong.
12385 * This is done both for leaving the TB at the end, and for leaving
12386 * it because of an exception we know will happen, which is done in
12387 * gen_exception_insn(). The latter is necessary because we need to
12388 * leave the TB with the PC/IT state just prior to execution of the
12389 * instruction which caused the exception.
12390 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12391 * then the CPUARMState will be wrong and we need to reset it.
12392 * This is handled in the same way as restoration of the
12393 * PC in these situations; we save the value of the condexec bits
12394 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12395 * then uses this to restore them after an exception.
12397 * Note that there are no instructions which can read the condexec
12398 * bits, and none which can write non-static values to them, so
12399 * we don't need to care about whether CPUARMState is correct in the
12403 /* Reset the conditional execution bits immediately. This avoids
12404 complications trying to do it at the end of the block. */
12405 if (dc->condexec_mask || dc->condexec_cond) {
12406 TCGv_i32 tmp = tcg_temp_new_i32();
12407 tcg_gen_movi_i32(tmp, 0);
12408 store_cpu_field(tmp, condexec_bits);
12410 tcg_clear_temp_count();
12413 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12415 DisasContext *dc = container_of(dcbase, DisasContext, base);
12417 tcg_gen_insn_start(dc->pc,
12418 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12420 dc->insn_start = tcg_last_op();
12423 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12424 const CPUBreakpoint *bp)
12426 DisasContext *dc = container_of(dcbase, DisasContext, base);
12428 if (bp->flags & BP_CPU) {
12429 gen_set_condexec(dc);
12430 gen_set_pc_im(dc, dc->pc);
12431 gen_helper_check_breakpoints(cpu_env);
12432 /* End the TB early; it's likely not going to be executed */
12433 dc->base.is_jmp = DISAS_TOO_MANY;
12435 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12436 /* The address covered by the breakpoint must be
12437 included in [tb->pc, tb->pc + tb->size) in order
12438 to for it to be properly cleared -- thus we
12439 increment the PC here so that the logic setting
12440 tb->size below does the right thing. */
12441 /* TODO: Advance PC by correct instruction length to
12442 * avoid disassembler error messages */
12444 dc->base.is_jmp = DISAS_NORETURN;
12450 static bool arm_pre_translate_insn(DisasContext *dc)
12452 #ifdef CONFIG_USER_ONLY
12453 /* Intercept jump to the magic kernel page. */
12454 if (dc->pc >= 0xffff0000) {
12455 /* We always get here via a jump, so know we are not in a
12456 conditional execution block. */
12457 gen_exception_internal(EXCP_KERNEL_TRAP);
12458 dc->base.is_jmp = DISAS_NORETURN;
12463 if (dc->ss_active && !dc->pstate_ss) {
12464 /* Singlestep state is Active-pending.
12465 * If we're in this state at the start of a TB then either
12466 * a) we just took an exception to an EL which is being debugged
12467 * and this is the first insn in the exception handler
12468 * b) debug exceptions were masked and we just unmasked them
12469 * without changing EL (eg by clearing PSTATE.D)
12470 * In either case we're going to take a swstep exception in the
12471 * "did not step an insn" case, and so the syndrome ISV and EX
12472 * bits should be zero.
12474 assert(dc->base.num_insns == 1);
12475 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12476 default_exception_el(dc));
12477 dc->base.is_jmp = DISAS_NORETURN;
12484 static void arm_post_translate_insn(DisasContext *dc)
12486 if (dc->condjmp && !dc->base.is_jmp) {
12487 gen_set_label(dc->condlabel);
12490 dc->base.pc_next = dc->pc;
12491 translator_loop_temp_check(&dc->base);
12494 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12496 DisasContext *dc = container_of(dcbase, DisasContext, base);
12497 CPUARMState *env = cpu->env_ptr;
12500 if (arm_pre_translate_insn(dc)) {
12504 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
12507 disas_arm_insn(dc, insn);
12509 arm_post_translate_insn(dc);
12511 /* ARM is a fixed-length ISA. We performed the cross-page check
12512 in init_disas_context by adjusting max_insns. */
12515 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12517 /* Return true if this Thumb insn is always unconditional,
12518 * even inside an IT block. This is true of only a very few
12519 * instructions: BKPT, HLT, and SG.
12521 * A larger class of instructions are UNPREDICTABLE if used
12522 * inside an IT block; we do not need to detect those here, because
12523 * what we do by default (perform the cc check and update the IT
12524 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12525 * choice for those situations.
12527 * insn is either a 16-bit or a 32-bit instruction; the two are
12528 * distinguishable because for the 16-bit case the top 16 bits
12529 * are zeroes, and that isn't a valid 32-bit encoding.
12531 if ((insn & 0xffffff00) == 0xbe00) {
12536 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12537 !arm_dc_feature(s, ARM_FEATURE_M)) {
12538 /* HLT: v8A only. This is unconditional even when it is going to
12539 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12540 * For v7 cores this was a plain old undefined encoding and so
12541 * honours its cc check. (We might be using the encoding as
12542 * a semihosting trap, but we don't change the cc check behaviour
12543 * on that account, because a debugger connected to a real v7A
12544 * core and emulating semihosting traps by catching the UNDEF
12545 * exception would also only see cases where the cc check passed.
12546 * No guest code should be trying to do a HLT semihosting trap
12547 * in an IT block anyway.
12552 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12553 arm_dc_feature(s, ARM_FEATURE_M)) {
12561 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12563 DisasContext *dc = container_of(dcbase, DisasContext, base);
12564 CPUARMState *env = cpu->env_ptr;
12568 if (arm_pre_translate_insn(dc)) {
12572 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12573 is_16bit = thumb_insn_is_16bit(dc, insn);
12576 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12578 insn = insn << 16 | insn2;
12583 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
12584 uint32_t cond = dc->condexec_cond;
12586 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12587 dc->condlabel = gen_new_label();
12588 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12594 disas_thumb_insn(dc, insn);
12596 disas_thumb2_insn(dc, insn);
12599 /* Advance the Thumb condexec condition. */
12600 if (dc->condexec_mask) {
12601 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12602 ((dc->condexec_mask >> 4) & 1));
12603 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12604 if (dc->condexec_mask == 0) {
12605 dc->condexec_cond = 0;
12609 arm_post_translate_insn(dc);
12611 /* Thumb is a variable-length ISA. Stop translation when the next insn
12612 * will touch a new page. This ensures that prefetch aborts occur at
12615 * We want to stop the TB if the next insn starts in a new page,
12616 * or if it spans between this page and the next. This means that
12617 * if we're looking at the last halfword in the page we need to
12618 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12619 * or a 32-bit Thumb insn (which won't).
12620 * This is to avoid generating a silly TB with a single 16-bit insn
12621 * in it at the end of this page (which would execute correctly
12622 * but isn't very efficient).
12624 if (dc->base.is_jmp == DISAS_NEXT
12625 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12626 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
12627 && insn_crosses_page(env, dc)))) {
12628 dc->base.is_jmp = DISAS_TOO_MANY;
12632 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12634 DisasContext *dc = container_of(dcbase, DisasContext, base);
12636 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
12637 /* FIXME: This can theoretically happen with self-modifying code. */
12638 cpu_abort(cpu, "IO on conditional branch instruction");
12641 /* At this stage dc->condjmp will only be set when the skipped
12642 instruction was a conditional branch or trap, and the PC has
12643 already been written. */
12644 gen_set_condexec(dc);
12645 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12646 /* Exception return branches need some special case code at the
12647 * end of the TB, which is complex enough that it has to
12648 * handle the single-step vs not and the condition-failed
12649 * insn codepath itself.
12651 gen_bx_excret_final_code(dc);
12652 } else if (unlikely(is_singlestepping(dc))) {
12653 /* Unconditional and "condition passed" instruction codepath. */
12654 switch (dc->base.is_jmp) {
12656 gen_ss_advance(dc);
12657 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12658 default_exception_el(dc));
12661 gen_ss_advance(dc);
12662 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12665 gen_ss_advance(dc);
12666 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12669 case DISAS_TOO_MANY:
12671 gen_set_pc_im(dc, dc->pc);
12674 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12675 gen_singlestep_exception(dc);
12677 case DISAS_NORETURN:
12681 /* While branches must always occur at the end of an IT block,
12682 there are a few other things that can cause us to terminate
12683 the TB in the middle of an IT block:
12684 - Exception generating instructions (bkpt, swi, undefined).
12686 - Hardware watchpoints.
12687 Hardware breakpoints have already been handled and skip this code.
12689 switch(dc->base.is_jmp) {
12691 case DISAS_TOO_MANY:
12692 gen_goto_tb(dc, 1, dc->pc);
12698 gen_set_pc_im(dc, dc->pc);
12701 /* indicate that the hash table must be used to find the next TB */
12702 tcg_gen_exit_tb(0);
12704 case DISAS_NORETURN:
12705 /* nothing more to generate */
12709 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12710 !(dc->insn & (1U << 31))) ? 2 : 4);
12712 gen_helper_wfi(cpu_env, tmp);
12713 tcg_temp_free_i32(tmp);
12714 /* The helper doesn't necessarily throw an exception, but we
12715 * must go back to the main loop to check for interrupts anyway.
12717 tcg_gen_exit_tb(0);
12721 gen_helper_wfe(cpu_env);
12724 gen_helper_yield(cpu_env);
12727 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12728 default_exception_el(dc));
12731 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12734 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12740 /* "Condition failed" instruction codepath for the branch/trap insn */
12741 gen_set_label(dc->condlabel);
12742 gen_set_condexec(dc);
12743 if (unlikely(is_singlestepping(dc))) {
12744 gen_set_pc_im(dc, dc->pc);
12745 gen_singlestep_exception(dc);
12747 gen_goto_tb(dc, 1, dc->pc);
12751 /* Functions above can change dc->pc, so re-align db->pc_next */
12752 dc->base.pc_next = dc->pc;
12755 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12757 DisasContext *dc = container_of(dcbase, DisasContext, base);
12759 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12760 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
12763 static const TranslatorOps arm_translator_ops = {
12764 .init_disas_context = arm_tr_init_disas_context,
12765 .tb_start = arm_tr_tb_start,
12766 .insn_start = arm_tr_insn_start,
12767 .breakpoint_check = arm_tr_breakpoint_check,
12768 .translate_insn = arm_tr_translate_insn,
12769 .tb_stop = arm_tr_tb_stop,
12770 .disas_log = arm_tr_disas_log,
12773 static const TranslatorOps thumb_translator_ops = {
12774 .init_disas_context = arm_tr_init_disas_context,
12775 .tb_start = arm_tr_tb_start,
12776 .insn_start = arm_tr_insn_start,
12777 .breakpoint_check = arm_tr_breakpoint_check,
12778 .translate_insn = thumb_tr_translate_insn,
12779 .tb_stop = arm_tr_tb_stop,
12780 .disas_log = arm_tr_disas_log,
12783 /* generate intermediate code for basic block 'tb'. */
12784 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
12787 const TranslatorOps *ops = &arm_translator_ops;
12789 if (ARM_TBFLAG_THUMB(tb->flags)) {
12790 ops = &thumb_translator_ops;
12792 #ifdef TARGET_AARCH64
12793 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
12794 ops = &aarch64_translator_ops;
12798 translator_loop(ops, &dc.base, cpu, tb);
12801 static const char *cpu_mode_names[16] = {
12802 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12803 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12806 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12809 ARMCPU *cpu = ARM_CPU(cs);
12810 CPUARMState *env = &cpu->env;
12814 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12818 for(i=0;i<16;i++) {
12819 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12821 cpu_fprintf(f, "\n");
12823 cpu_fprintf(f, " ");
12826 if (arm_feature(env, ARM_FEATURE_M)) {
12827 uint32_t xpsr = xpsr_read(env);
12829 const char *ns_status = "";
12831 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12832 ns_status = env->v7m.secure ? "S " : "NS ";
12835 if (xpsr & XPSR_EXCP) {
12838 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
12839 mode = "unpriv-thread";
12841 mode = "priv-thread";
12845 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
12847 xpsr & XPSR_N ? 'N' : '-',
12848 xpsr & XPSR_Z ? 'Z' : '-',
12849 xpsr & XPSR_C ? 'C' : '-',
12850 xpsr & XPSR_V ? 'V' : '-',
12851 xpsr & XPSR_T ? 'T' : 'A',
12855 uint32_t psr = cpsr_read(env);
12856 const char *ns_status = "";
12858 if (arm_feature(env, ARM_FEATURE_EL3) &&
12859 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12860 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12863 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12865 psr & CPSR_N ? 'N' : '-',
12866 psr & CPSR_Z ? 'Z' : '-',
12867 psr & CPSR_C ? 'C' : '-',
12868 psr & CPSR_V ? 'V' : '-',
12869 psr & CPSR_T ? 'T' : 'A',
12871 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12874 if (flags & CPU_DUMP_FPU) {
12875 int numvfpregs = 0;
12876 if (arm_feature(env, ARM_FEATURE_VFP)) {
12879 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12882 for (i = 0; i < numvfpregs; i++) {
12883 uint64_t v = *aa32_vfp_dreg(env, i);
12884 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12885 i * 2, (uint32_t)v,
12886 i * 2 + 1, (uint32_t)(v >> 32),
12889 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12893 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12894 target_ulong *data)
12898 env->condexec_bits = 0;
12899 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12901 env->regs[15] = data[0];
12902 env->condexec_bits = data[1];
12903 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;