4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
28 #include "tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "exec/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
73 #include "exec/gen-icount.h"
75 static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* Function prototypes for gen_ functions calling Neon helpers. */
80 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
83 /* initialize TCG globals. */
84 void arm_translate_init(void)
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
90 offsetof(CPUARMState, regs[i]),
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103 a64_translate_init();
106 /* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
109 typedef enum ISSInfo {
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
118 /* Save the syndrome information for a Data Abort */
119 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
149 static inline int get_a32_user_mem_index(DisasContext *s)
151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
179 g_assert_not_reached();
183 static inline TCGv_i32 load_cpu_offset(int offset)
185 TCGv_i32 tmp = tcg_temp_new_i32();
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
190 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
192 static inline void store_cpu_offset(TCGv_i32 var, int offset)
194 tcg_gen_st_i32(var, cpu_env, offset);
195 tcg_temp_free_i32(var);
198 #define store_cpu_field(var, name) \
199 store_cpu_offset(var, offsetof(CPUARMState, name))
201 /* Set a variable to the value of a CPU register. */
202 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
206 /* normally, since we updated PC, we need only to add one insn */
208 addr = (long)s->pc + 2;
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
213 tcg_gen_mov_i32(var, cpu_R[reg]);
217 /* Create a new temporary and set it to the value of a CPU register. */
218 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
220 TCGv_i32 tmp = tcg_temp_new_i32();
221 load_reg_var(s, tmp, reg);
225 /* Set a CPU register. The source must be a temporary and will be
227 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
236 s->base.is_jmp = DISAS_JUMP;
238 tcg_gen_mov_i32(cpu_R[reg], var);
239 tcg_temp_free_i32(var);
242 /* Value extensions. */
243 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
244 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
245 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
246 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
248 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
249 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
252 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
254 TCGv_i32 tmp_mask = tcg_const_i32(mask);
255 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
256 tcg_temp_free_i32(tmp_mask);
258 /* Set NZCV flags from the high 4 bits of var. */
259 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
261 static void gen_exception_internal(int excp)
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
265 assert(excp_is_internal(excp));
266 gen_helper_exception_internal(cpu_env, tcg_excp);
267 tcg_temp_free_i32(tcg_excp);
270 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
272 TCGv_i32 tcg_excp = tcg_const_i32(excp);
273 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
274 TCGv_i32 tcg_el = tcg_const_i32(target_el);
276 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
279 tcg_temp_free_i32(tcg_el);
280 tcg_temp_free_i32(tcg_syn);
281 tcg_temp_free_i32(tcg_excp);
284 static void gen_ss_advance(DisasContext *s)
286 /* If the singlestep state is Active-not-pending, advance to
291 gen_helper_clear_pstate_ss(cpu_env);
295 static void gen_step_complete_exception(DisasContext *s)
297 /* We just completed step of an insn. Move from Active-not-pending
298 * to Active-pending, and then also take the swstep exception.
299 * This corresponds to making the (IMPDEF) choice to prioritize
300 * swstep exceptions over asynchronous exceptions taken to an exception
301 * level where debug is disabled. This choice has the advantage that
302 * we do not need to maintain internal state corresponding to the
303 * ISV/EX syndrome bits between completion of the step and generation
304 * of the exception, and our syndrome information is always correct.
307 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
308 default_exception_el(s));
309 s->base.is_jmp = DISAS_NORETURN;
312 static void gen_singlestep_exception(DisasContext *s)
314 /* Generate the right kind of exception for singlestep, which is
315 * either the architectural singlestep or EXCP_DEBUG for QEMU's
316 * gdb singlestepping.
319 gen_step_complete_exception(s);
321 gen_exception_internal(EXCP_DEBUG);
325 static inline bool is_singlestepping(DisasContext *s)
327 /* Return true if we are singlestepping either because of
328 * architectural singlestep or QEMU gdbstub singlestep. This does
329 * not include the command line '-singlestep' mode which is rather
330 * misnamed as it only means "one instruction per TB" and doesn't
331 * affect the code we generate.
333 return s->base.singlestep_enabled || s->ss_active;
336 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
338 TCGv_i32 tmp1 = tcg_temp_new_i32();
339 TCGv_i32 tmp2 = tcg_temp_new_i32();
340 tcg_gen_ext16s_i32(tmp1, a);
341 tcg_gen_ext16s_i32(tmp2, b);
342 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
343 tcg_temp_free_i32(tmp2);
344 tcg_gen_sari_i32(a, a, 16);
345 tcg_gen_sari_i32(b, b, 16);
346 tcg_gen_mul_i32(b, b, a);
347 tcg_gen_mov_i32(a, tmp1);
348 tcg_temp_free_i32(tmp1);
351 /* Byteswap each halfword. */
352 static void gen_rev16(TCGv_i32 var)
354 TCGv_i32 tmp = tcg_temp_new_i32();
355 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
356 tcg_gen_shri_i32(tmp, var, 8);
357 tcg_gen_and_i32(tmp, tmp, mask);
358 tcg_gen_and_i32(var, var, mask);
359 tcg_gen_shli_i32(var, var, 8);
360 tcg_gen_or_i32(var, var, tmp);
361 tcg_temp_free_i32(mask);
362 tcg_temp_free_i32(tmp);
365 /* Byteswap low halfword and sign extend. */
366 static void gen_revsh(TCGv_i32 var)
368 tcg_gen_ext16u_i32(var, var);
369 tcg_gen_bswap16_i32(var, var);
370 tcg_gen_ext16s_i32(var, var);
373 /* Return (b << 32) + a. Mark inputs as dead */
374 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
376 TCGv_i64 tmp64 = tcg_temp_new_i64();
378 tcg_gen_extu_i32_i64(tmp64, b);
379 tcg_temp_free_i32(b);
380 tcg_gen_shli_i64(tmp64, tmp64, 32);
381 tcg_gen_add_i64(a, tmp64, a);
383 tcg_temp_free_i64(tmp64);
387 /* Return (b << 32) - a. Mark inputs as dead. */
388 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
390 TCGv_i64 tmp64 = tcg_temp_new_i64();
392 tcg_gen_extu_i32_i64(tmp64, b);
393 tcg_temp_free_i32(b);
394 tcg_gen_shli_i64(tmp64, tmp64, 32);
395 tcg_gen_sub_i64(a, tmp64, a);
397 tcg_temp_free_i64(tmp64);
401 /* 32x32->64 multiply. Marks inputs as dead. */
402 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
404 TCGv_i32 lo = tcg_temp_new_i32();
405 TCGv_i32 hi = tcg_temp_new_i32();
408 tcg_gen_mulu2_i32(lo, hi, a, b);
409 tcg_temp_free_i32(a);
410 tcg_temp_free_i32(b);
412 ret = tcg_temp_new_i64();
413 tcg_gen_concat_i32_i64(ret, lo, hi);
414 tcg_temp_free_i32(lo);
415 tcg_temp_free_i32(hi);
420 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
422 TCGv_i32 lo = tcg_temp_new_i32();
423 TCGv_i32 hi = tcg_temp_new_i32();
426 tcg_gen_muls2_i32(lo, hi, a, b);
427 tcg_temp_free_i32(a);
428 tcg_temp_free_i32(b);
430 ret = tcg_temp_new_i64();
431 tcg_gen_concat_i32_i64(ret, lo, hi);
432 tcg_temp_free_i32(lo);
433 tcg_temp_free_i32(hi);
438 /* Swap low and high halfwords. */
439 static void gen_swap_half(TCGv_i32 var)
441 TCGv_i32 tmp = tcg_temp_new_i32();
442 tcg_gen_shri_i32(tmp, var, 16);
443 tcg_gen_shli_i32(var, var, 16);
444 tcg_gen_or_i32(var, var, tmp);
445 tcg_temp_free_i32(tmp);
448 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
449 tmp = (t0 ^ t1) & 0x8000;
452 t0 = (t0 + t1) ^ tmp;
455 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
457 TCGv_i32 tmp = tcg_temp_new_i32();
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andi_i32(tmp, tmp, 0x8000);
460 tcg_gen_andi_i32(t0, t0, ~0x8000);
461 tcg_gen_andi_i32(t1, t1, ~0x8000);
462 tcg_gen_add_i32(t0, t0, t1);
463 tcg_gen_xor_i32(t0, t0, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_temp_free_i32(t1);
468 /* Set CF to the top bit of var. */
469 static void gen_set_CF_bit31(TCGv_i32 var)
471 tcg_gen_shri_i32(cpu_CF, var, 31);
474 /* Set N and Z flags from var. */
475 static inline void gen_logic_CC(TCGv_i32 var)
477 tcg_gen_mov_i32(cpu_NF, var);
478 tcg_gen_mov_i32(cpu_ZF, var);
482 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
484 tcg_gen_add_i32(t0, t0, t1);
485 tcg_gen_add_i32(t0, t0, cpu_CF);
488 /* dest = T0 + T1 + CF. */
489 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
491 tcg_gen_add_i32(dest, t0, t1);
492 tcg_gen_add_i32(dest, dest, cpu_CF);
495 /* dest = T0 - T1 + CF - 1. */
496 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
498 tcg_gen_sub_i32(dest, t0, t1);
499 tcg_gen_add_i32(dest, dest, cpu_CF);
500 tcg_gen_subi_i32(dest, dest, 1);
503 /* dest = T0 + T1. Compute C, N, V and Z flags */
504 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
506 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_movi_i32(tmp, 0);
508 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
509 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
510 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
511 tcg_gen_xor_i32(tmp, t0, t1);
512 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
513 tcg_temp_free_i32(tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
517 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
518 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
520 TCGv_i32 tmp = tcg_temp_new_i32();
521 if (TCG_TARGET_HAS_add2_i32) {
522 tcg_gen_movi_i32(tmp, 0);
523 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
524 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
526 TCGv_i64 q0 = tcg_temp_new_i64();
527 TCGv_i64 q1 = tcg_temp_new_i64();
528 tcg_gen_extu_i32_i64(q0, t0);
529 tcg_gen_extu_i32_i64(q1, t1);
530 tcg_gen_add_i64(q0, q0, q1);
531 tcg_gen_extu_i32_i64(q1, cpu_CF);
532 tcg_gen_add_i64(q0, q0, q1);
533 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
534 tcg_temp_free_i64(q0);
535 tcg_temp_free_i64(q1);
537 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
538 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
539 tcg_gen_xor_i32(tmp, t0, t1);
540 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
541 tcg_temp_free_i32(tmp);
542 tcg_gen_mov_i32(dest, cpu_NF);
545 /* dest = T0 - T1. Compute C, N, V and Z flags */
546 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
549 tcg_gen_sub_i32(cpu_NF, t0, t1);
550 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
551 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
552 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
553 tmp = tcg_temp_new_i32();
554 tcg_gen_xor_i32(tmp, t0, t1);
555 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
556 tcg_temp_free_i32(tmp);
557 tcg_gen_mov_i32(dest, cpu_NF);
560 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
561 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
563 TCGv_i32 tmp = tcg_temp_new_i32();
564 tcg_gen_not_i32(tmp, t1);
565 gen_adc_CC(dest, t0, tmp);
566 tcg_temp_free_i32(tmp);
569 #define GEN_SHIFT(name) \
570 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
572 TCGv_i32 tmp1, tmp2, tmp3; \
573 tmp1 = tcg_temp_new_i32(); \
574 tcg_gen_andi_i32(tmp1, t1, 0xff); \
575 tmp2 = tcg_const_i32(0); \
576 tmp3 = tcg_const_i32(0x1f); \
577 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
578 tcg_temp_free_i32(tmp3); \
579 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
580 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
581 tcg_temp_free_i32(tmp2); \
582 tcg_temp_free_i32(tmp1); \
588 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
591 tmp1 = tcg_temp_new_i32();
592 tcg_gen_andi_i32(tmp1, t1, 0xff);
593 tmp2 = tcg_const_i32(0x1f);
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
595 tcg_temp_free_i32(tmp2);
596 tcg_gen_sar_i32(dest, t0, tmp1);
597 tcg_temp_free_i32(tmp1);
600 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
602 TCGv_i32 c0 = tcg_const_i32(0);
603 TCGv_i32 tmp = tcg_temp_new_i32();
604 tcg_gen_neg_i32(tmp, src);
605 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
606 tcg_temp_free_i32(c0);
607 tcg_temp_free_i32(tmp);
610 static void shifter_out_im(TCGv_i32 var, int shift)
613 tcg_gen_andi_i32(cpu_CF, var, 1);
615 tcg_gen_shri_i32(cpu_CF, var, shift);
617 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
622 /* Shift by immediate. Includes special handling for shift == 0. */
623 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
624 int shift, int flags)
630 shifter_out_im(var, 32 - shift);
631 tcg_gen_shli_i32(var, var, shift);
637 tcg_gen_shri_i32(cpu_CF, var, 31);
639 tcg_gen_movi_i32(var, 0);
642 shifter_out_im(var, shift - 1);
643 tcg_gen_shri_i32(var, var, shift);
650 shifter_out_im(var, shift - 1);
653 tcg_gen_sari_i32(var, var, shift);
655 case 3: /* ROR/RRX */
658 shifter_out_im(var, shift - 1);
659 tcg_gen_rotri_i32(var, var, shift); break;
661 TCGv_i32 tmp = tcg_temp_new_i32();
662 tcg_gen_shli_i32(tmp, cpu_CF, 31);
664 shifter_out_im(var, 0);
665 tcg_gen_shri_i32(var, var, 1);
666 tcg_gen_or_i32(var, var, tmp);
667 tcg_temp_free_i32(tmp);
672 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
673 TCGv_i32 shift, int flags)
677 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
678 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
679 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
680 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
685 gen_shl(var, var, shift);
688 gen_shr(var, var, shift);
691 gen_sar(var, var, shift);
693 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
694 tcg_gen_rotr_i32(var, var, shift); break;
697 tcg_temp_free_i32(shift);
700 #define PAS_OP(pfx) \
702 case 0: gen_pas_helper(glue(pfx,add16)); break; \
703 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
704 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
705 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
706 case 4: gen_pas_helper(glue(pfx,add8)); break; \
707 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
709 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
714 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
716 tmp = tcg_temp_new_ptr();
717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
719 tcg_temp_free_ptr(tmp);
722 tmp = tcg_temp_new_ptr();
723 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
725 tcg_temp_free_ptr(tmp);
727 #undef gen_pas_helper
728 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
741 #undef gen_pas_helper
746 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
747 #define PAS_OP(pfx) \
749 case 0: gen_pas_helper(glue(pfx,add8)); break; \
750 case 1: gen_pas_helper(glue(pfx,add16)); break; \
751 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
752 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
753 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
754 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
756 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
761 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
763 tmp = tcg_temp_new_ptr();
764 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
766 tcg_temp_free_ptr(tmp);
769 tmp = tcg_temp_new_ptr();
770 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
772 tcg_temp_free_ptr(tmp);
774 #undef gen_pas_helper
775 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
788 #undef gen_pas_helper
794 * Generate a conditional based on ARM condition code cc.
795 * This is common between ARM and Aarch64 targets.
797 void arm_test_cc(DisasCompare *cmp, int cc)
828 case 8: /* hi: C && !Z */
829 case 9: /* ls: !C || Z -> !(C && !Z) */
831 value = tcg_temp_new_i32();
833 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
834 ZF is non-zero for !Z; so AND the two subexpressions. */
835 tcg_gen_neg_i32(value, cpu_CF);
836 tcg_gen_and_i32(value, value, cpu_ZF);
839 case 10: /* ge: N == V -> N ^ V == 0 */
840 case 11: /* lt: N != V -> N ^ V != 0 */
841 /* Since we're only interested in the sign bit, == 0 is >= 0. */
843 value = tcg_temp_new_i32();
845 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
848 case 12: /* gt: !Z && N == V */
849 case 13: /* le: Z || N != V */
851 value = tcg_temp_new_i32();
853 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
854 * the sign bit then AND with ZF to yield the result. */
855 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
856 tcg_gen_sari_i32(value, value, 31);
857 tcg_gen_andc_i32(value, cpu_ZF, value);
860 case 14: /* always */
861 case 15: /* always */
862 /* Use the ALWAYS condition, which will fold early.
863 * It doesn't matter what we use for the value. */
864 cond = TCG_COND_ALWAYS;
869 fprintf(stderr, "Bad condition code 0x%x\n", cc);
874 cond = tcg_invert_cond(cond);
880 cmp->value_global = global;
883 void arm_free_cc(DisasCompare *cmp)
885 if (!cmp->value_global) {
886 tcg_temp_free_i32(cmp->value);
890 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
892 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
895 void arm_gen_test_cc(int cc, TCGLabel *label)
898 arm_test_cc(&cmp, cc);
899 arm_jump_cc(&cmp, label);
903 static const uint8_t table_logic_cc[16] = {
922 static inline void gen_set_condexec(DisasContext *s)
924 if (s->condexec_mask) {
925 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
926 TCGv_i32 tmp = tcg_temp_new_i32();
927 tcg_gen_movi_i32(tmp, val);
928 store_cpu_field(tmp, condexec_bits);
932 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
934 tcg_gen_movi_i32(cpu_R[15], val);
937 /* Set PC and Thumb state from an immediate address. */
938 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
942 s->base.is_jmp = DISAS_JUMP;
943 if (s->thumb != (addr & 1)) {
944 tmp = tcg_temp_new_i32();
945 tcg_gen_movi_i32(tmp, addr & 1);
946 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
947 tcg_temp_free_i32(tmp);
949 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
952 /* Set PC and Thumb state from var. var is marked as dead. */
953 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
955 s->base.is_jmp = DISAS_JUMP;
956 tcg_gen_andi_i32(cpu_R[15], var, ~1);
957 tcg_gen_andi_i32(var, var, 1);
958 store_cpu_field(var, thumb);
961 /* Set PC and Thumb state from var. var is marked as dead.
962 * For M-profile CPUs, include logic to detect exception-return
963 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
964 * and BX reg, and no others, and happens only for code in Handler mode.
966 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
968 /* Generate the same code here as for a simple bx, but flag via
969 * s->base.is_jmp that we need to do the rest of the work later.
972 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
973 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
974 s->base.is_jmp = DISAS_BX_EXCRET;
978 static inline void gen_bx_excret_final_code(DisasContext *s)
980 /* Generate the code to finish possible exception return and end the TB */
981 TCGLabel *excret_label = gen_new_label();
984 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
985 /* Covers FNC_RETURN and EXC_RETURN magic */
986 min_magic = FNC_RETURN_MIN_MAGIC;
988 /* EXC_RETURN magic only */
989 min_magic = EXC_RETURN_MIN_MAGIC;
992 /* Is the new PC value in the magic range indicating exception return? */
993 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
994 /* No: end the TB as we would for a DISAS_JMP */
995 if (is_singlestepping(s)) {
996 gen_singlestep_exception(s);
998 tcg_gen_exit_tb(NULL, 0);
1000 gen_set_label(excret_label);
1001 /* Yes: this is an exception return.
1002 * At this point in runtime env->regs[15] and env->thumb will hold
1003 * the exception-return magic number, which do_v7m_exception_exit()
1004 * will read. Nothing else will be able to see those values because
1005 * the cpu-exec main loop guarantees that we will always go straight
1006 * from raising the exception to the exception-handling code.
1008 * gen_ss_advance(s) does nothing on M profile currently but
1009 * calling it is conceptually the right thing as we have executed
1010 * this instruction (compare SWI, HVC, SMC handling).
1013 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1016 static inline void gen_bxns(DisasContext *s, int rm)
1018 TCGv_i32 var = load_reg(s, rm);
1020 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1021 * we need to sync state before calling it, but:
1022 * - we don't need to do gen_set_pc_im() because the bxns helper will
1023 * always set the PC itself
1024 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1025 * unless it's outside an IT block or the last insn in an IT block,
1026 * so we know that condexec == 0 (already set at the top of the TB)
1027 * is correct in the non-UNPREDICTABLE cases, and we can choose
1028 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1030 gen_helper_v7m_bxns(cpu_env, var);
1031 tcg_temp_free_i32(var);
1032 s->base.is_jmp = DISAS_EXIT;
1035 static inline void gen_blxns(DisasContext *s, int rm)
1037 TCGv_i32 var = load_reg(s, rm);
1039 /* We don't need to sync condexec state, for the same reason as bxns.
1040 * We do however need to set the PC, because the blxns helper reads it.
1041 * The blxns helper may throw an exception.
1043 gen_set_pc_im(s, s->pc);
1044 gen_helper_v7m_blxns(cpu_env, var);
1045 tcg_temp_free_i32(var);
1046 s->base.is_jmp = DISAS_EXIT;
1049 /* Variant of store_reg which uses branch&exchange logic when storing
1050 to r15 in ARM architecture v7 and above. The source must be a temporary
1051 and will be marked as dead. */
1052 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1054 if (reg == 15 && ENABLE_ARCH_7) {
1057 store_reg(s, reg, var);
1061 /* Variant of store_reg which uses branch&exchange logic when storing
1062 * to r15 in ARM architecture v5T and above. This is used for storing
1063 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1064 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1065 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1067 if (reg == 15 && ENABLE_ARCH_5) {
1068 gen_bx_excret(s, var);
1070 store_reg(s, reg, var);
1074 #ifdef CONFIG_USER_ONLY
1075 #define IS_USER_ONLY 1
1077 #define IS_USER_ONLY 0
1080 /* Abstractions of "generate code to do a guest load/store for
1081 * AArch32", where a vaddr is always 32 bits (and is zero
1082 * extended if we're a 64 bit core) and data is also
1083 * 32 bits unless specifically doing a 64 bit access.
1084 * These functions work like tcg_gen_qemu_{ld,st}* except
1085 * that the address argument is TCGv_i32 rather than TCGv.
1088 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1090 TCGv addr = tcg_temp_new();
1091 tcg_gen_extu_i32_tl(addr, a32);
1093 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1094 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1095 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1100 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1101 int index, TCGMemOp opc)
1105 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1106 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1110 addr = gen_aa32_addr(s, a32, opc);
1111 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1112 tcg_temp_free(addr);
1115 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1116 int index, TCGMemOp opc)
1120 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1121 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1125 addr = gen_aa32_addr(s, a32, opc);
1126 tcg_gen_qemu_st_i32(val, addr, index, opc);
1127 tcg_temp_free(addr);
1130 #define DO_GEN_LD(SUFF, OPC) \
1131 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1132 TCGv_i32 a32, int index) \
1134 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1136 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1138 TCGv_i32 a32, int index, \
1141 gen_aa32_ld##SUFF(s, val, a32, index); \
1142 disas_set_da_iss(s, OPC, issinfo); \
1145 #define DO_GEN_ST(SUFF, OPC) \
1146 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1147 TCGv_i32 a32, int index) \
1149 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1151 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1153 TCGv_i32 a32, int index, \
1156 gen_aa32_st##SUFF(s, val, a32, index); \
1157 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1160 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1162 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1163 if (!IS_USER_ONLY && s->sctlr_b) {
1164 tcg_gen_rotri_i64(val, val, 32);
1168 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1169 int index, TCGMemOp opc)
1171 TCGv addr = gen_aa32_addr(s, a32, opc);
1172 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1173 gen_aa32_frob64(s, val);
1174 tcg_temp_free(addr);
1177 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1178 TCGv_i32 a32, int index)
1180 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1183 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1184 int index, TCGMemOp opc)
1186 TCGv addr = gen_aa32_addr(s, a32, opc);
1188 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1189 if (!IS_USER_ONLY && s->sctlr_b) {
1190 TCGv_i64 tmp = tcg_temp_new_i64();
1191 tcg_gen_rotri_i64(tmp, val, 32);
1192 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1193 tcg_temp_free_i64(tmp);
1195 tcg_gen_qemu_st_i64(val, addr, index, opc);
1197 tcg_temp_free(addr);
1200 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1201 TCGv_i32 a32, int index)
1203 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1206 DO_GEN_LD(8s, MO_SB)
1207 DO_GEN_LD(8u, MO_UB)
1208 DO_GEN_LD(16s, MO_SW)
1209 DO_GEN_LD(16u, MO_UW)
1210 DO_GEN_LD(32u, MO_UL)
1212 DO_GEN_ST(16, MO_UW)
1213 DO_GEN_ST(32, MO_UL)
1215 static inline void gen_hvc(DisasContext *s, int imm16)
1217 /* The pre HVC helper handles cases when HVC gets trapped
1218 * as an undefined insn by runtime configuration (ie before
1219 * the insn really executes).
1221 gen_set_pc_im(s, s->pc - 4);
1222 gen_helper_pre_hvc(cpu_env);
1223 /* Otherwise we will treat this as a real exception which
1224 * happens after execution of the insn. (The distinction matters
1225 * for the PC value reported to the exception handler and also
1226 * for single stepping.)
1229 gen_set_pc_im(s, s->pc);
1230 s->base.is_jmp = DISAS_HVC;
1233 static inline void gen_smc(DisasContext *s)
1235 /* As with HVC, we may take an exception either before or after
1236 * the insn executes.
1240 gen_set_pc_im(s, s->pc - 4);
1241 tmp = tcg_const_i32(syn_aa32_smc());
1242 gen_helper_pre_smc(cpu_env, tmp);
1243 tcg_temp_free_i32(tmp);
1244 gen_set_pc_im(s, s->pc);
1245 s->base.is_jmp = DISAS_SMC;
1248 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1250 gen_set_condexec(s);
1251 gen_set_pc_im(s, s->pc - offset);
1252 gen_exception_internal(excp);
1253 s->base.is_jmp = DISAS_NORETURN;
1256 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1257 int syn, uint32_t target_el)
1259 gen_set_condexec(s);
1260 gen_set_pc_im(s, s->pc - offset);
1261 gen_exception(excp, syn, target_el);
1262 s->base.is_jmp = DISAS_NORETURN;
1265 static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1269 gen_set_condexec(s);
1270 gen_set_pc_im(s, s->pc - offset);
1271 tcg_syn = tcg_const_i32(syn);
1272 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1273 tcg_temp_free_i32(tcg_syn);
1274 s->base.is_jmp = DISAS_NORETURN;
1277 /* Force a TB lookup after an instruction that changes the CPU state. */
1278 static inline void gen_lookup_tb(DisasContext *s)
1280 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1281 s->base.is_jmp = DISAS_EXIT;
1284 static inline void gen_hlt(DisasContext *s, int imm)
1286 /* HLT. This has two purposes.
1287 * Architecturally, it is an external halting debug instruction.
1288 * Since QEMU doesn't implement external debug, we treat this as
1289 * it is required for halting debug disabled: it will UNDEF.
1290 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1291 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1292 * must trigger semihosting even for ARMv7 and earlier, where
1293 * HLT was an undefined encoding.
1294 * In system mode, we don't allow userspace access to
1295 * semihosting, to provide some semblance of security
1296 * (and for consistency with our 32-bit semihosting).
1298 if (semihosting_enabled() &&
1299 #ifndef CONFIG_USER_ONLY
1300 s->current_el != 0 &&
1302 (imm == (s->thumb ? 0x3c : 0xf000))) {
1303 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1307 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1308 default_exception_el(s));
1311 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1314 int val, rm, shift, shiftop;
1317 if (!(insn & (1 << 25))) {
1320 if (!(insn & (1 << 23)))
1323 tcg_gen_addi_i32(var, var, val);
1325 /* shift/register */
1327 shift = (insn >> 7) & 0x1f;
1328 shiftop = (insn >> 5) & 3;
1329 offset = load_reg(s, rm);
1330 gen_arm_shift_im(offset, shiftop, shift, 0);
1331 if (!(insn & (1 << 23)))
1332 tcg_gen_sub_i32(var, var, offset);
1334 tcg_gen_add_i32(var, var, offset);
1335 tcg_temp_free_i32(offset);
1339 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1340 int extra, TCGv_i32 var)
1345 if (insn & (1 << 22)) {
1347 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1348 if (!(insn & (1 << 23)))
1352 tcg_gen_addi_i32(var, var, val);
1356 tcg_gen_addi_i32(var, var, extra);
1358 offset = load_reg(s, rm);
1359 if (!(insn & (1 << 23)))
1360 tcg_gen_sub_i32(var, var, offset);
1362 tcg_gen_add_i32(var, var, offset);
1363 tcg_temp_free_i32(offset);
1367 static TCGv_ptr get_fpstatus_ptr(int neon)
1369 TCGv_ptr statusptr = tcg_temp_new_ptr();
1372 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1374 offset = offsetof(CPUARMState, vfp.fp_status);
1376 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1380 #define VFP_OP2(name) \
1381 static inline void gen_vfp_##name(int dp) \
1383 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1385 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1387 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1389 tcg_temp_free_ptr(fpst); \
1399 static inline void gen_vfp_F1_mul(int dp)
1401 /* Like gen_vfp_mul() but put result in F1 */
1402 TCGv_ptr fpst = get_fpstatus_ptr(0);
1404 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1406 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1408 tcg_temp_free_ptr(fpst);
1411 static inline void gen_vfp_F1_neg(int dp)
1413 /* Like gen_vfp_neg() but put result in F1 */
1415 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1417 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1421 static inline void gen_vfp_abs(int dp)
1424 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1426 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1429 static inline void gen_vfp_neg(int dp)
1432 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1434 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1437 static inline void gen_vfp_sqrt(int dp)
1440 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1442 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1445 static inline void gen_vfp_cmp(int dp)
1448 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1450 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1453 static inline void gen_vfp_cmpe(int dp)
1456 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1458 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1461 static inline void gen_vfp_F1_ld0(int dp)
1464 tcg_gen_movi_i64(cpu_F1d, 0);
1466 tcg_gen_movi_i32(cpu_F1s, 0);
1469 #define VFP_GEN_ITOF(name) \
1470 static inline void gen_vfp_##name(int dp, int neon) \
1472 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1474 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1476 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1478 tcg_temp_free_ptr(statusptr); \
1485 #define VFP_GEN_FTOI(name) \
1486 static inline void gen_vfp_##name(int dp, int neon) \
1488 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1490 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1492 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1494 tcg_temp_free_ptr(statusptr); \
1503 #define VFP_GEN_FIX(name, round) \
1504 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1506 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1507 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1509 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1512 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1515 tcg_temp_free_i32(tmp_shift); \
1516 tcg_temp_free_ptr(statusptr); \
1518 VFP_GEN_FIX(tosh, _round_to_zero)
1519 VFP_GEN_FIX(tosl, _round_to_zero)
1520 VFP_GEN_FIX(touh, _round_to_zero)
1521 VFP_GEN_FIX(toul, _round_to_zero)
1528 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1531 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1533 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1537 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1540 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1542 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1546 static inline long vfp_reg_offset(bool dp, unsigned reg)
1549 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1551 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1553 ofs += offsetof(CPU_DoubleU, l.upper);
1555 ofs += offsetof(CPU_DoubleU, l.lower);
1561 /* Return the offset of a 32-bit piece of a NEON register.
1562 zero is the least significant end of the register. */
1564 neon_reg_offset (int reg, int n)
1568 return vfp_reg_offset(0, sreg);
1571 static TCGv_i32 neon_load_reg(int reg, int pass)
1573 TCGv_i32 tmp = tcg_temp_new_i32();
1574 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1578 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1580 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1581 tcg_temp_free_i32(var);
1584 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1586 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1589 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1591 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1594 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1596 TCGv_ptr ret = tcg_temp_new_ptr();
1597 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1601 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1602 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1603 #define tcg_gen_st_f32 tcg_gen_st_i32
1604 #define tcg_gen_st_f64 tcg_gen_st_i64
1606 static inline void gen_mov_F0_vreg(int dp, int reg)
1609 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1611 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1614 static inline void gen_mov_F1_vreg(int dp, int reg)
1617 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1619 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1622 static inline void gen_mov_vreg_F0(int dp, int reg)
1625 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1627 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1630 #define ARM_CP_RW_BIT (1 << 20)
1632 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1634 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1637 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1639 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1642 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1644 TCGv_i32 var = tcg_temp_new_i32();
1645 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1649 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1651 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1652 tcg_temp_free_i32(var);
1655 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1657 iwmmxt_store_reg(cpu_M0, rn);
1660 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1662 iwmmxt_load_reg(cpu_M0, rn);
1665 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1667 iwmmxt_load_reg(cpu_V1, rn);
1668 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1671 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1673 iwmmxt_load_reg(cpu_V1, rn);
1674 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1677 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1679 iwmmxt_load_reg(cpu_V1, rn);
1680 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1683 #define IWMMXT_OP(name) \
1684 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1686 iwmmxt_load_reg(cpu_V1, rn); \
1687 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1690 #define IWMMXT_OP_ENV(name) \
1691 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1693 iwmmxt_load_reg(cpu_V1, rn); \
1694 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1697 #define IWMMXT_OP_ENV_SIZE(name) \
1698 IWMMXT_OP_ENV(name##b) \
1699 IWMMXT_OP_ENV(name##w) \
1700 IWMMXT_OP_ENV(name##l)
1702 #define IWMMXT_OP_ENV1(name) \
1703 static inline void gen_op_iwmmxt_##name##_M0(void) \
1705 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1719 IWMMXT_OP_ENV_SIZE(unpackl)
1720 IWMMXT_OP_ENV_SIZE(unpackh)
1722 IWMMXT_OP_ENV1(unpacklub)
1723 IWMMXT_OP_ENV1(unpackluw)
1724 IWMMXT_OP_ENV1(unpacklul)
1725 IWMMXT_OP_ENV1(unpackhub)
1726 IWMMXT_OP_ENV1(unpackhuw)
1727 IWMMXT_OP_ENV1(unpackhul)
1728 IWMMXT_OP_ENV1(unpacklsb)
1729 IWMMXT_OP_ENV1(unpacklsw)
1730 IWMMXT_OP_ENV1(unpacklsl)
1731 IWMMXT_OP_ENV1(unpackhsb)
1732 IWMMXT_OP_ENV1(unpackhsw)
1733 IWMMXT_OP_ENV1(unpackhsl)
1735 IWMMXT_OP_ENV_SIZE(cmpeq)
1736 IWMMXT_OP_ENV_SIZE(cmpgtu)
1737 IWMMXT_OP_ENV_SIZE(cmpgts)
1739 IWMMXT_OP_ENV_SIZE(mins)
1740 IWMMXT_OP_ENV_SIZE(minu)
1741 IWMMXT_OP_ENV_SIZE(maxs)
1742 IWMMXT_OP_ENV_SIZE(maxu)
1744 IWMMXT_OP_ENV_SIZE(subn)
1745 IWMMXT_OP_ENV_SIZE(addn)
1746 IWMMXT_OP_ENV_SIZE(subu)
1747 IWMMXT_OP_ENV_SIZE(addu)
1748 IWMMXT_OP_ENV_SIZE(subs)
1749 IWMMXT_OP_ENV_SIZE(adds)
1751 IWMMXT_OP_ENV(avgb0)
1752 IWMMXT_OP_ENV(avgb1)
1753 IWMMXT_OP_ENV(avgw0)
1754 IWMMXT_OP_ENV(avgw1)
1756 IWMMXT_OP_ENV(packuw)
1757 IWMMXT_OP_ENV(packul)
1758 IWMMXT_OP_ENV(packuq)
1759 IWMMXT_OP_ENV(packsw)
1760 IWMMXT_OP_ENV(packsl)
1761 IWMMXT_OP_ENV(packsq)
1763 static void gen_op_iwmmxt_set_mup(void)
1766 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1767 tcg_gen_ori_i32(tmp, tmp, 2);
1768 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1771 static void gen_op_iwmmxt_set_cup(void)
1774 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1775 tcg_gen_ori_i32(tmp, tmp, 1);
1776 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1779 static void gen_op_iwmmxt_setpsr_nz(void)
1781 TCGv_i32 tmp = tcg_temp_new_i32();
1782 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1783 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1786 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1788 iwmmxt_load_reg(cpu_V1, rn);
1789 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1790 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1793 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1800 rd = (insn >> 16) & 0xf;
1801 tmp = load_reg(s, rd);
1803 offset = (insn & 0xff) << ((insn >> 7) & 2);
1804 if (insn & (1 << 24)) {
1806 if (insn & (1 << 23))
1807 tcg_gen_addi_i32(tmp, tmp, offset);
1809 tcg_gen_addi_i32(tmp, tmp, -offset);
1810 tcg_gen_mov_i32(dest, tmp);
1811 if (insn & (1 << 21))
1812 store_reg(s, rd, tmp);
1814 tcg_temp_free_i32(tmp);
1815 } else if (insn & (1 << 21)) {
1817 tcg_gen_mov_i32(dest, tmp);
1818 if (insn & (1 << 23))
1819 tcg_gen_addi_i32(tmp, tmp, offset);
1821 tcg_gen_addi_i32(tmp, tmp, -offset);
1822 store_reg(s, rd, tmp);
1823 } else if (!(insn & (1 << 23)))
1828 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1830 int rd = (insn >> 0) & 0xf;
1833 if (insn & (1 << 8)) {
1834 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1837 tmp = iwmmxt_load_creg(rd);
1840 tmp = tcg_temp_new_i32();
1841 iwmmxt_load_reg(cpu_V0, rd);
1842 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1844 tcg_gen_andi_i32(tmp, tmp, mask);
1845 tcg_gen_mov_i32(dest, tmp);
1846 tcg_temp_free_i32(tmp);
1850 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1851 (ie. an undefined instruction). */
1852 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1855 int rdhi, rdlo, rd0, rd1, i;
1857 TCGv_i32 tmp, tmp2, tmp3;
1859 if ((insn & 0x0e000e00) == 0x0c000000) {
1860 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1862 rdlo = (insn >> 12) & 0xf;
1863 rdhi = (insn >> 16) & 0xf;
1864 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1865 iwmmxt_load_reg(cpu_V0, wrd);
1866 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1867 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1868 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1869 } else { /* TMCRR */
1870 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1871 iwmmxt_store_reg(cpu_V0, wrd);
1872 gen_op_iwmmxt_set_mup();
1877 wrd = (insn >> 12) & 0xf;
1878 addr = tcg_temp_new_i32();
1879 if (gen_iwmmxt_address(s, insn, addr)) {
1880 tcg_temp_free_i32(addr);
1883 if (insn & ARM_CP_RW_BIT) {
1884 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1885 tmp = tcg_temp_new_i32();
1886 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1887 iwmmxt_store_creg(wrd, tmp);
1890 if (insn & (1 << 8)) {
1891 if (insn & (1 << 22)) { /* WLDRD */
1892 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1894 } else { /* WLDRW wRd */
1895 tmp = tcg_temp_new_i32();
1896 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1899 tmp = tcg_temp_new_i32();
1900 if (insn & (1 << 22)) { /* WLDRH */
1901 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1902 } else { /* WLDRB */
1903 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1907 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1908 tcg_temp_free_i32(tmp);
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1913 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1914 tmp = iwmmxt_load_creg(wrd);
1915 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1917 gen_op_iwmmxt_movq_M0_wRn(wrd);
1918 tmp = tcg_temp_new_i32();
1919 if (insn & (1 << 8)) {
1920 if (insn & (1 << 22)) { /* WSTRD */
1921 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1922 } else { /* WSTRW wRd */
1923 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1924 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1927 if (insn & (1 << 22)) { /* WSTRH */
1928 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1929 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1930 } else { /* WSTRB */
1931 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1932 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1936 tcg_temp_free_i32(tmp);
1938 tcg_temp_free_i32(addr);
1942 if ((insn & 0x0f000000) != 0x0e000000)
1945 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1946 case 0x000: /* WOR */
1947 wrd = (insn >> 12) & 0xf;
1948 rd0 = (insn >> 0) & 0xf;
1949 rd1 = (insn >> 16) & 0xf;
1950 gen_op_iwmmxt_movq_M0_wRn(rd0);
1951 gen_op_iwmmxt_orq_M0_wRn(rd1);
1952 gen_op_iwmmxt_setpsr_nz();
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 gen_op_iwmmxt_set_cup();
1957 case 0x011: /* TMCR */
1960 rd = (insn >> 12) & 0xf;
1961 wrd = (insn >> 16) & 0xf;
1963 case ARM_IWMMXT_wCID:
1964 case ARM_IWMMXT_wCASF:
1966 case ARM_IWMMXT_wCon:
1967 gen_op_iwmmxt_set_cup();
1969 case ARM_IWMMXT_wCSSF:
1970 tmp = iwmmxt_load_creg(wrd);
1971 tmp2 = load_reg(s, rd);
1972 tcg_gen_andc_i32(tmp, tmp, tmp2);
1973 tcg_temp_free_i32(tmp2);
1974 iwmmxt_store_creg(wrd, tmp);
1976 case ARM_IWMMXT_wCGR0:
1977 case ARM_IWMMXT_wCGR1:
1978 case ARM_IWMMXT_wCGR2:
1979 case ARM_IWMMXT_wCGR3:
1980 gen_op_iwmmxt_set_cup();
1981 tmp = load_reg(s, rd);
1982 iwmmxt_store_creg(wrd, tmp);
1988 case 0x100: /* WXOR */
1989 wrd = (insn >> 12) & 0xf;
1990 rd0 = (insn >> 0) & 0xf;
1991 rd1 = (insn >> 16) & 0xf;
1992 gen_op_iwmmxt_movq_M0_wRn(rd0);
1993 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1994 gen_op_iwmmxt_setpsr_nz();
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1999 case 0x111: /* TMRC */
2002 rd = (insn >> 12) & 0xf;
2003 wrd = (insn >> 16) & 0xf;
2004 tmp = iwmmxt_load_creg(wrd);
2005 store_reg(s, rd, tmp);
2007 case 0x300: /* WANDN */
2008 wrd = (insn >> 12) & 0xf;
2009 rd0 = (insn >> 0) & 0xf;
2010 rd1 = (insn >> 16) & 0xf;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0);
2012 tcg_gen_neg_i64(cpu_M0, cpu_M0);
2013 gen_op_iwmmxt_andq_M0_wRn(rd1);
2014 gen_op_iwmmxt_setpsr_nz();
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2019 case 0x200: /* WAND */
2020 wrd = (insn >> 12) & 0xf;
2021 rd0 = (insn >> 0) & 0xf;
2022 rd1 = (insn >> 16) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0);
2024 gen_op_iwmmxt_andq_M0_wRn(rd1);
2025 gen_op_iwmmxt_setpsr_nz();
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 gen_op_iwmmxt_set_cup();
2030 case 0x810: case 0xa10: /* WMADD */
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 0) & 0xf;
2033 rd1 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2038 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2042 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 rd1 = (insn >> 0) & 0xf;
2046 gen_op_iwmmxt_movq_M0_wRn(rd0);
2047 switch ((insn >> 22) & 3) {
2049 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2052 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2055 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2064 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 16) & 0xf;
2067 rd1 = (insn >> 0) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2071 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2074 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2077 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2086 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 if (insn & (1 << 22))
2092 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2094 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2095 if (!(insn & (1 << 20)))
2096 gen_op_iwmmxt_addl_M0_wRn(wrd);
2097 gen_op_iwmmxt_movq_wRn_M0(wrd);
2098 gen_op_iwmmxt_set_mup();
2100 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2101 wrd = (insn >> 12) & 0xf;
2102 rd0 = (insn >> 16) & 0xf;
2103 rd1 = (insn >> 0) & 0xf;
2104 gen_op_iwmmxt_movq_M0_wRn(rd0);
2105 if (insn & (1 << 21)) {
2106 if (insn & (1 << 20))
2107 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2109 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2111 if (insn & (1 << 20))
2112 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2114 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2119 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 rd1 = (insn >> 0) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2127 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2128 if (!(insn & (1 << 20))) {
2129 iwmmxt_load_reg(cpu_V1, wrd);
2130 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2135 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 rd1 = (insn >> 0) & 0xf;
2139 gen_op_iwmmxt_movq_M0_wRn(rd0);
2140 switch ((insn >> 22) & 3) {
2142 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2145 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2148 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2157 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2158 wrd = (insn >> 12) & 0xf;
2159 rd0 = (insn >> 16) & 0xf;
2160 rd1 = (insn >> 0) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
2162 if (insn & (1 << 22)) {
2163 if (insn & (1 << 20))
2164 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2166 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2168 if (insn & (1 << 20))
2169 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2171 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2173 gen_op_iwmmxt_movq_wRn_M0(wrd);
2174 gen_op_iwmmxt_set_mup();
2175 gen_op_iwmmxt_set_cup();
2177 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2178 wrd = (insn >> 12) & 0xf;
2179 rd0 = (insn >> 16) & 0xf;
2180 rd1 = (insn >> 0) & 0xf;
2181 gen_op_iwmmxt_movq_M0_wRn(rd0);
2182 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2183 tcg_gen_andi_i32(tmp, tmp, 7);
2184 iwmmxt_load_reg(cpu_V1, rd1);
2185 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2186 tcg_temp_free_i32(tmp);
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2190 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2191 if (((insn >> 6) & 3) == 3)
2193 rd = (insn >> 12) & 0xf;
2194 wrd = (insn >> 16) & 0xf;
2195 tmp = load_reg(s, rd);
2196 gen_op_iwmmxt_movq_M0_wRn(wrd);
2197 switch ((insn >> 6) & 3) {
2199 tmp2 = tcg_const_i32(0xff);
2200 tmp3 = tcg_const_i32((insn & 7) << 3);
2203 tmp2 = tcg_const_i32(0xffff);
2204 tmp3 = tcg_const_i32((insn & 3) << 4);
2207 tmp2 = tcg_const_i32(0xffffffff);
2208 tmp3 = tcg_const_i32((insn & 1) << 5);
2214 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2215 tcg_temp_free_i32(tmp3);
2216 tcg_temp_free_i32(tmp2);
2217 tcg_temp_free_i32(tmp);
2218 gen_op_iwmmxt_movq_wRn_M0(wrd);
2219 gen_op_iwmmxt_set_mup();
2221 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2222 rd = (insn >> 12) & 0xf;
2223 wrd = (insn >> 16) & 0xf;
2224 if (rd == 15 || ((insn >> 22) & 3) == 3)
2226 gen_op_iwmmxt_movq_M0_wRn(wrd);
2227 tmp = tcg_temp_new_i32();
2228 switch ((insn >> 22) & 3) {
2230 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2231 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2233 tcg_gen_ext8s_i32(tmp, tmp);
2235 tcg_gen_andi_i32(tmp, tmp, 0xff);
2239 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2240 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2242 tcg_gen_ext16s_i32(tmp, tmp);
2244 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2248 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2249 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2252 store_reg(s, rd, tmp);
2254 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2255 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2257 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2258 switch ((insn >> 22) & 3) {
2260 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2263 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2266 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2269 tcg_gen_shli_i32(tmp, tmp, 28);
2271 tcg_temp_free_i32(tmp);
2273 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2274 if (((insn >> 6) & 3) == 3)
2276 rd = (insn >> 12) & 0xf;
2277 wrd = (insn >> 16) & 0xf;
2278 tmp = load_reg(s, rd);
2279 switch ((insn >> 6) & 3) {
2281 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2284 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2287 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2290 tcg_temp_free_i32(tmp);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2294 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2295 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2297 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2298 tmp2 = tcg_temp_new_i32();
2299 tcg_gen_mov_i32(tmp2, tmp);
2300 switch ((insn >> 22) & 3) {
2302 for (i = 0; i < 7; i ++) {
2303 tcg_gen_shli_i32(tmp2, tmp2, 4);
2304 tcg_gen_and_i32(tmp, tmp, tmp2);
2308 for (i = 0; i < 3; i ++) {
2309 tcg_gen_shli_i32(tmp2, tmp2, 8);
2310 tcg_gen_and_i32(tmp, tmp, tmp2);
2314 tcg_gen_shli_i32(tmp2, tmp2, 16);
2315 tcg_gen_and_i32(tmp, tmp, tmp2);
2319 tcg_temp_free_i32(tmp2);
2320 tcg_temp_free_i32(tmp);
2322 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2323 wrd = (insn >> 12) & 0xf;
2324 rd0 = (insn >> 16) & 0xf;
2325 gen_op_iwmmxt_movq_M0_wRn(rd0);
2326 switch ((insn >> 22) & 3) {
2328 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2331 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2334 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2339 gen_op_iwmmxt_movq_wRn_M0(wrd);
2340 gen_op_iwmmxt_set_mup();
2342 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2343 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2345 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2346 tmp2 = tcg_temp_new_i32();
2347 tcg_gen_mov_i32(tmp2, tmp);
2348 switch ((insn >> 22) & 3) {
2350 for (i = 0; i < 7; i ++) {
2351 tcg_gen_shli_i32(tmp2, tmp2, 4);
2352 tcg_gen_or_i32(tmp, tmp, tmp2);
2356 for (i = 0; i < 3; i ++) {
2357 tcg_gen_shli_i32(tmp2, tmp2, 8);
2358 tcg_gen_or_i32(tmp, tmp, tmp2);
2362 tcg_gen_shli_i32(tmp2, tmp2, 16);
2363 tcg_gen_or_i32(tmp, tmp, tmp2);
2367 tcg_temp_free_i32(tmp2);
2368 tcg_temp_free_i32(tmp);
2370 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2371 rd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2375 gen_op_iwmmxt_movq_M0_wRn(rd0);
2376 tmp = tcg_temp_new_i32();
2377 switch ((insn >> 22) & 3) {
2379 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2382 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2385 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2388 store_reg(s, rd, tmp);
2390 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2391 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2392 wrd = (insn >> 12) & 0xf;
2393 rd0 = (insn >> 16) & 0xf;
2394 rd1 = (insn >> 0) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 switch ((insn >> 22) & 3) {
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2401 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2407 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2410 if (insn & (1 << 21))
2411 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2413 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2418 gen_op_iwmmxt_movq_wRn_M0(wrd);
2419 gen_op_iwmmxt_set_mup();
2420 gen_op_iwmmxt_set_cup();
2422 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2423 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2424 wrd = (insn >> 12) & 0xf;
2425 rd0 = (insn >> 16) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
2427 switch ((insn >> 22) & 3) {
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_unpacklsb_M0();
2432 gen_op_iwmmxt_unpacklub_M0();
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_unpacklsw_M0();
2438 gen_op_iwmmxt_unpackluw_M0();
2441 if (insn & (1 << 21))
2442 gen_op_iwmmxt_unpacklsl_M0();
2444 gen_op_iwmmxt_unpacklul_M0();
2449 gen_op_iwmmxt_movq_wRn_M0(wrd);
2450 gen_op_iwmmxt_set_mup();
2451 gen_op_iwmmxt_set_cup();
2453 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2454 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2455 wrd = (insn >> 12) & 0xf;
2456 rd0 = (insn >> 16) & 0xf;
2457 gen_op_iwmmxt_movq_M0_wRn(rd0);
2458 switch ((insn >> 22) & 3) {
2460 if (insn & (1 << 21))
2461 gen_op_iwmmxt_unpackhsb_M0();
2463 gen_op_iwmmxt_unpackhub_M0();
2466 if (insn & (1 << 21))
2467 gen_op_iwmmxt_unpackhsw_M0();
2469 gen_op_iwmmxt_unpackhuw_M0();
2472 if (insn & (1 << 21))
2473 gen_op_iwmmxt_unpackhsl_M0();
2475 gen_op_iwmmxt_unpackhul_M0();
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 gen_op_iwmmxt_set_cup();
2484 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2485 case 0x214: case 0x614: case 0xa14: case 0xe14:
2486 if (((insn >> 22) & 3) == 0)
2488 wrd = (insn >> 12) & 0xf;
2489 rd0 = (insn >> 16) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0);
2491 tmp = tcg_temp_new_i32();
2492 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2493 tcg_temp_free_i32(tmp);
2496 switch ((insn >> 22) & 3) {
2498 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2501 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2504 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2507 tcg_temp_free_i32(tmp);
2508 gen_op_iwmmxt_movq_wRn_M0(wrd);
2509 gen_op_iwmmxt_set_mup();
2510 gen_op_iwmmxt_set_cup();
2512 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2513 case 0x014: case 0x414: case 0x814: case 0xc14:
2514 if (((insn >> 22) & 3) == 0)
2516 wrd = (insn >> 12) & 0xf;
2517 rd0 = (insn >> 16) & 0xf;
2518 gen_op_iwmmxt_movq_M0_wRn(rd0);
2519 tmp = tcg_temp_new_i32();
2520 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2521 tcg_temp_free_i32(tmp);
2524 switch ((insn >> 22) & 3) {
2526 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2529 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2532 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2535 tcg_temp_free_i32(tmp);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2540 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2541 case 0x114: case 0x514: case 0x914: case 0xd14:
2542 if (((insn >> 22) & 3) == 0)
2544 wrd = (insn >> 12) & 0xf;
2545 rd0 = (insn >> 16) & 0xf;
2546 gen_op_iwmmxt_movq_M0_wRn(rd0);
2547 tmp = tcg_temp_new_i32();
2548 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2549 tcg_temp_free_i32(tmp);
2552 switch ((insn >> 22) & 3) {
2554 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2557 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2560 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2563 tcg_temp_free_i32(tmp);
2564 gen_op_iwmmxt_movq_wRn_M0(wrd);
2565 gen_op_iwmmxt_set_mup();
2566 gen_op_iwmmxt_set_cup();
2568 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2569 case 0x314: case 0x714: case 0xb14: case 0xf14:
2570 if (((insn >> 22) & 3) == 0)
2572 wrd = (insn >> 12) & 0xf;
2573 rd0 = (insn >> 16) & 0xf;
2574 gen_op_iwmmxt_movq_M0_wRn(rd0);
2575 tmp = tcg_temp_new_i32();
2576 switch ((insn >> 22) & 3) {
2578 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2579 tcg_temp_free_i32(tmp);
2582 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2585 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2586 tcg_temp_free_i32(tmp);
2589 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2592 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2593 tcg_temp_free_i32(tmp);
2596 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2599 tcg_temp_free_i32(tmp);
2600 gen_op_iwmmxt_movq_wRn_M0(wrd);
2601 gen_op_iwmmxt_set_mup();
2602 gen_op_iwmmxt_set_cup();
2604 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2605 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
2610 switch ((insn >> 22) & 3) {
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2615 gen_op_iwmmxt_minub_M0_wRn(rd1);
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2621 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2627 gen_op_iwmmxt_minul_M0_wRn(rd1);
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2635 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2636 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2637 wrd = (insn >> 12) & 0xf;
2638 rd0 = (insn >> 16) & 0xf;
2639 rd1 = (insn >> 0) & 0xf;
2640 gen_op_iwmmxt_movq_M0_wRn(rd0);
2641 switch ((insn >> 22) & 3) {
2643 if (insn & (1 << 21))
2644 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2646 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2649 if (insn & (1 << 21))
2650 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2652 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2655 if (insn & (1 << 21))
2656 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2658 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2663 gen_op_iwmmxt_movq_wRn_M0(wrd);
2664 gen_op_iwmmxt_set_mup();
2666 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2667 case 0x402: case 0x502: case 0x602: case 0x702:
2668 wrd = (insn >> 12) & 0xf;
2669 rd0 = (insn >> 16) & 0xf;
2670 rd1 = (insn >> 0) & 0xf;
2671 gen_op_iwmmxt_movq_M0_wRn(rd0);
2672 tmp = tcg_const_i32((insn >> 20) & 3);
2673 iwmmxt_load_reg(cpu_V1, rd1);
2674 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2675 tcg_temp_free_i32(tmp);
2676 gen_op_iwmmxt_movq_wRn_M0(wrd);
2677 gen_op_iwmmxt_set_mup();
2679 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2680 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2681 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2682 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2683 wrd = (insn >> 12) & 0xf;
2684 rd0 = (insn >> 16) & 0xf;
2685 rd1 = (insn >> 0) & 0xf;
2686 gen_op_iwmmxt_movq_M0_wRn(rd0);
2687 switch ((insn >> 20) & 0xf) {
2689 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2692 gen_op_iwmmxt_subub_M0_wRn(rd1);
2695 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2698 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2701 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2704 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2707 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2710 gen_op_iwmmxt_subul_M0_wRn(rd1);
2713 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2722 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2723 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2724 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2725 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2726 wrd = (insn >> 12) & 0xf;
2727 rd0 = (insn >> 16) & 0xf;
2728 gen_op_iwmmxt_movq_M0_wRn(rd0);
2729 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2730 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2731 tcg_temp_free_i32(tmp);
2732 gen_op_iwmmxt_movq_wRn_M0(wrd);
2733 gen_op_iwmmxt_set_mup();
2734 gen_op_iwmmxt_set_cup();
2736 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2737 case 0x418: case 0x518: case 0x618: case 0x718:
2738 case 0x818: case 0x918: case 0xa18: case 0xb18:
2739 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2740 wrd = (insn >> 12) & 0xf;
2741 rd0 = (insn >> 16) & 0xf;
2742 rd1 = (insn >> 0) & 0xf;
2743 gen_op_iwmmxt_movq_M0_wRn(rd0);
2744 switch ((insn >> 20) & 0xf) {
2746 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2749 gen_op_iwmmxt_addub_M0_wRn(rd1);
2752 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2755 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2758 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2761 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2764 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2767 gen_op_iwmmxt_addul_M0_wRn(rd1);
2770 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2775 gen_op_iwmmxt_movq_wRn_M0(wrd);
2776 gen_op_iwmmxt_set_mup();
2777 gen_op_iwmmxt_set_cup();
2779 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2780 case 0x408: case 0x508: case 0x608: case 0x708:
2781 case 0x808: case 0x908: case 0xa08: case 0xb08:
2782 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2783 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2785 wrd = (insn >> 12) & 0xf;
2786 rd0 = (insn >> 16) & 0xf;
2787 rd1 = (insn >> 0) & 0xf;
2788 gen_op_iwmmxt_movq_M0_wRn(rd0);
2789 switch ((insn >> 22) & 3) {
2791 if (insn & (1 << 21))
2792 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2794 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2797 if (insn & (1 << 21))
2798 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2800 gen_op_iwmmxt_packul_M0_wRn(rd1);
2803 if (insn & (1 << 21))
2804 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2806 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2809 gen_op_iwmmxt_movq_wRn_M0(wrd);
2810 gen_op_iwmmxt_set_mup();
2811 gen_op_iwmmxt_set_cup();
2813 case 0x201: case 0x203: case 0x205: case 0x207:
2814 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2815 case 0x211: case 0x213: case 0x215: case 0x217:
2816 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2817 wrd = (insn >> 5) & 0xf;
2818 rd0 = (insn >> 12) & 0xf;
2819 rd1 = (insn >> 0) & 0xf;
2820 if (rd0 == 0xf || rd1 == 0xf)
2822 gen_op_iwmmxt_movq_M0_wRn(wrd);
2823 tmp = load_reg(s, rd0);
2824 tmp2 = load_reg(s, rd1);
2825 switch ((insn >> 16) & 0xf) {
2826 case 0x0: /* TMIA */
2827 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2829 case 0x8: /* TMIAPH */
2830 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2832 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2833 if (insn & (1 << 16))
2834 tcg_gen_shri_i32(tmp, tmp, 16);
2835 if (insn & (1 << 17))
2836 tcg_gen_shri_i32(tmp2, tmp2, 16);
2837 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2840 tcg_temp_free_i32(tmp2);
2841 tcg_temp_free_i32(tmp);
2844 tcg_temp_free_i32(tmp2);
2845 tcg_temp_free_i32(tmp);
2846 gen_op_iwmmxt_movq_wRn_M0(wrd);
2847 gen_op_iwmmxt_set_mup();
2856 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2857 (ie. an undefined instruction). */
2858 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2860 int acc, rd0, rd1, rdhi, rdlo;
2863 if ((insn & 0x0ff00f10) == 0x0e200010) {
2864 /* Multiply with Internal Accumulate Format */
2865 rd0 = (insn >> 12) & 0xf;
2867 acc = (insn >> 5) & 7;
2872 tmp = load_reg(s, rd0);
2873 tmp2 = load_reg(s, rd1);
2874 switch ((insn >> 16) & 0xf) {
2876 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2878 case 0x8: /* MIAPH */
2879 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2881 case 0xc: /* MIABB */
2882 case 0xd: /* MIABT */
2883 case 0xe: /* MIATB */
2884 case 0xf: /* MIATT */
2885 if (insn & (1 << 16))
2886 tcg_gen_shri_i32(tmp, tmp, 16);
2887 if (insn & (1 << 17))
2888 tcg_gen_shri_i32(tmp2, tmp2, 16);
2889 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2894 tcg_temp_free_i32(tmp2);
2895 tcg_temp_free_i32(tmp);
2897 gen_op_iwmmxt_movq_wRn_M0(acc);
2901 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2902 /* Internal Accumulator Access Format */
2903 rdhi = (insn >> 16) & 0xf;
2904 rdlo = (insn >> 12) & 0xf;
2910 if (insn & ARM_CP_RW_BIT) { /* MRA */
2911 iwmmxt_load_reg(cpu_V0, acc);
2912 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2913 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2914 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2915 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2917 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2918 iwmmxt_store_reg(cpu_V0, acc);
2926 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2927 #define VFP_SREG(insn, bigbit, smallbit) \
2928 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2929 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2930 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2931 reg = (((insn) >> (bigbit)) & 0x0f) \
2932 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2934 if (insn & (1 << (smallbit))) \
2936 reg = ((insn) >> (bigbit)) & 0x0f; \
2939 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2940 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2941 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2942 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2943 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2944 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2946 /* Move between integer and VFP cores. */
2947 static TCGv_i32 gen_vfp_mrs(void)
2949 TCGv_i32 tmp = tcg_temp_new_i32();
2950 tcg_gen_mov_i32(tmp, cpu_F0s);
2954 static void gen_vfp_msr(TCGv_i32 tmp)
2956 tcg_gen_mov_i32(cpu_F0s, tmp);
2957 tcg_temp_free_i32(tmp);
2960 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2962 TCGv_i32 tmp = tcg_temp_new_i32();
2964 tcg_gen_shri_i32(var, var, shift);
2965 tcg_gen_ext8u_i32(var, var);
2966 tcg_gen_shli_i32(tmp, var, 8);
2967 tcg_gen_or_i32(var, var, tmp);
2968 tcg_gen_shli_i32(tmp, var, 16);
2969 tcg_gen_or_i32(var, var, tmp);
2970 tcg_temp_free_i32(tmp);
2973 static void gen_neon_dup_low16(TCGv_i32 var)
2975 TCGv_i32 tmp = tcg_temp_new_i32();
2976 tcg_gen_ext16u_i32(var, var);
2977 tcg_gen_shli_i32(tmp, var, 16);
2978 tcg_gen_or_i32(var, var, tmp);
2979 tcg_temp_free_i32(tmp);
2982 static void gen_neon_dup_high16(TCGv_i32 var)
2984 TCGv_i32 tmp = tcg_temp_new_i32();
2985 tcg_gen_andi_i32(var, var, 0xffff0000);
2986 tcg_gen_shri_i32(tmp, var, 16);
2987 tcg_gen_or_i32(var, var, tmp);
2988 tcg_temp_free_i32(tmp);
2991 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2993 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2994 TCGv_i32 tmp = tcg_temp_new_i32();
2997 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2998 gen_neon_dup_u8(tmp, 0);
3001 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
3002 gen_neon_dup_low16(tmp);
3005 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
3007 default: /* Avoid compiler warnings. */
3013 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3016 uint32_t cc = extract32(insn, 20, 2);
3019 TCGv_i64 frn, frm, dest;
3020 TCGv_i64 tmp, zero, zf, nf, vf;
3022 zero = tcg_const_i64(0);
3024 frn = tcg_temp_new_i64();
3025 frm = tcg_temp_new_i64();
3026 dest = tcg_temp_new_i64();
3028 zf = tcg_temp_new_i64();
3029 nf = tcg_temp_new_i64();
3030 vf = tcg_temp_new_i64();
3032 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3033 tcg_gen_ext_i32_i64(nf, cpu_NF);
3034 tcg_gen_ext_i32_i64(vf, cpu_VF);
3036 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3037 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3040 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3044 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3047 case 2: /* ge: N == V -> N ^ V == 0 */
3048 tmp = tcg_temp_new_i64();
3049 tcg_gen_xor_i64(tmp, vf, nf);
3050 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3052 tcg_temp_free_i64(tmp);
3054 case 3: /* gt: !Z && N == V */
3055 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3057 tmp = tcg_temp_new_i64();
3058 tcg_gen_xor_i64(tmp, vf, nf);
3059 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3061 tcg_temp_free_i64(tmp);
3064 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3065 tcg_temp_free_i64(frn);
3066 tcg_temp_free_i64(frm);
3067 tcg_temp_free_i64(dest);
3069 tcg_temp_free_i64(zf);
3070 tcg_temp_free_i64(nf);
3071 tcg_temp_free_i64(vf);
3073 tcg_temp_free_i64(zero);
3075 TCGv_i32 frn, frm, dest;
3078 zero = tcg_const_i32(0);
3080 frn = tcg_temp_new_i32();
3081 frm = tcg_temp_new_i32();
3082 dest = tcg_temp_new_i32();
3083 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3084 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3087 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3091 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3094 case 2: /* ge: N == V -> N ^ V == 0 */
3095 tmp = tcg_temp_new_i32();
3096 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3097 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3099 tcg_temp_free_i32(tmp);
3101 case 3: /* gt: !Z && N == V */
3102 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3104 tmp = tcg_temp_new_i32();
3105 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3106 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3108 tcg_temp_free_i32(tmp);
3111 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3112 tcg_temp_free_i32(frn);
3113 tcg_temp_free_i32(frm);
3114 tcg_temp_free_i32(dest);
3116 tcg_temp_free_i32(zero);
3122 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3123 uint32_t rm, uint32_t dp)
3125 uint32_t vmin = extract32(insn, 6, 1);
3126 TCGv_ptr fpst = get_fpstatus_ptr(0);
3129 TCGv_i64 frn, frm, dest;
3131 frn = tcg_temp_new_i64();
3132 frm = tcg_temp_new_i64();
3133 dest = tcg_temp_new_i64();
3135 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3136 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3138 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
3140 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
3142 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3143 tcg_temp_free_i64(frn);
3144 tcg_temp_free_i64(frm);
3145 tcg_temp_free_i64(dest);
3147 TCGv_i32 frn, frm, dest;
3149 frn = tcg_temp_new_i32();
3150 frm = tcg_temp_new_i32();
3151 dest = tcg_temp_new_i32();
3153 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3154 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3156 gen_helper_vfp_minnums(dest, frn, frm, fpst);
3158 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
3160 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3161 tcg_temp_free_i32(frn);
3162 tcg_temp_free_i32(frm);
3163 tcg_temp_free_i32(dest);
3166 tcg_temp_free_ptr(fpst);
3170 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3173 TCGv_ptr fpst = get_fpstatus_ptr(0);
3176 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3177 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3182 tcg_op = tcg_temp_new_i64();
3183 tcg_res = tcg_temp_new_i64();
3184 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3185 gen_helper_rintd(tcg_res, tcg_op, fpst);
3186 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3187 tcg_temp_free_i64(tcg_op);
3188 tcg_temp_free_i64(tcg_res);
3192 tcg_op = tcg_temp_new_i32();
3193 tcg_res = tcg_temp_new_i32();
3194 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3195 gen_helper_rints(tcg_res, tcg_op, fpst);
3196 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3197 tcg_temp_free_i32(tcg_op);
3198 tcg_temp_free_i32(tcg_res);
3201 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3202 tcg_temp_free_i32(tcg_rmode);
3204 tcg_temp_free_ptr(fpst);
3208 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3211 bool is_signed = extract32(insn, 7, 1);
3212 TCGv_ptr fpst = get_fpstatus_ptr(0);
3213 TCGv_i32 tcg_rmode, tcg_shift;
3215 tcg_shift = tcg_const_i32(0);
3217 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3218 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3221 TCGv_i64 tcg_double, tcg_res;
3223 /* Rd is encoded as a single precision register even when the source
3224 * is double precision.
3226 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3227 tcg_double = tcg_temp_new_i64();
3228 tcg_res = tcg_temp_new_i64();
3229 tcg_tmp = tcg_temp_new_i32();
3230 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3232 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3234 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3236 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3237 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3238 tcg_temp_free_i32(tcg_tmp);
3239 tcg_temp_free_i64(tcg_res);
3240 tcg_temp_free_i64(tcg_double);
3242 TCGv_i32 tcg_single, tcg_res;
3243 tcg_single = tcg_temp_new_i32();
3244 tcg_res = tcg_temp_new_i32();
3245 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3247 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3249 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3251 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3252 tcg_temp_free_i32(tcg_res);
3253 tcg_temp_free_i32(tcg_single);
3256 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3257 tcg_temp_free_i32(tcg_rmode);
3259 tcg_temp_free_i32(tcg_shift);
3261 tcg_temp_free_ptr(fpst);
3266 /* Table for converting the most common AArch32 encoding of
3267 * rounding mode to arm_fprounding order (which matches the
3268 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3270 static const uint8_t fp_decode_rm[] = {
3277 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3279 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3281 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3286 VFP_DREG_D(rd, insn);
3287 VFP_DREG_N(rn, insn);
3288 VFP_DREG_M(rm, insn);
3290 rd = VFP_SREG_D(insn);
3291 rn = VFP_SREG_N(insn);
3292 rm = VFP_SREG_M(insn);
3295 if ((insn & 0x0f800e50) == 0x0e000a00) {
3296 return handle_vsel(insn, rd, rn, rm, dp);
3297 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3298 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3299 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3300 /* VRINTA, VRINTN, VRINTP, VRINTM */
3301 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3302 return handle_vrint(insn, rd, rm, dp, rounding);
3303 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3304 /* VCVTA, VCVTN, VCVTP, VCVTM */
3305 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3306 return handle_vcvt(insn, rd, rm, dp, rounding);
3311 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3312 (ie. an undefined instruction). */
3313 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3315 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3321 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3325 /* FIXME: this access check should not take precedence over UNDEF
3326 * for invalid encodings; we will generate incorrect syndrome information
3327 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3329 if (s->fp_excp_el) {
3330 gen_exception_insn(s, 4, EXCP_UDEF,
3331 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3335 if (!s->vfp_enabled) {
3336 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3337 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3339 rn = (insn >> 16) & 0xf;
3340 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3341 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3346 if (extract32(insn, 28, 4) == 0xf) {
3347 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3348 * only used in v8 and above.
3350 return disas_vfp_v8_insn(s, insn);
3353 dp = ((insn & 0xf00) == 0xb00);
3354 switch ((insn >> 24) & 0xf) {
3356 if (insn & (1 << 4)) {
3357 /* single register transfer */
3358 rd = (insn >> 12) & 0xf;
3363 VFP_DREG_N(rn, insn);
3366 if (insn & 0x00c00060
3367 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3371 pass = (insn >> 21) & 1;
3372 if (insn & (1 << 22)) {
3374 offset = ((insn >> 5) & 3) * 8;
3375 } else if (insn & (1 << 5)) {
3377 offset = (insn & (1 << 6)) ? 16 : 0;
3382 if (insn & ARM_CP_RW_BIT) {
3384 tmp = neon_load_reg(rn, pass);
3388 tcg_gen_shri_i32(tmp, tmp, offset);
3389 if (insn & (1 << 23))
3395 if (insn & (1 << 23)) {
3397 tcg_gen_shri_i32(tmp, tmp, 16);
3403 tcg_gen_sari_i32(tmp, tmp, 16);
3412 store_reg(s, rd, tmp);
3415 tmp = load_reg(s, rd);
3416 if (insn & (1 << 23)) {
3419 gen_neon_dup_u8(tmp, 0);
3420 } else if (size == 1) {
3421 gen_neon_dup_low16(tmp);
3423 for (n = 0; n <= pass * 2; n++) {
3424 tmp2 = tcg_temp_new_i32();
3425 tcg_gen_mov_i32(tmp2, tmp);
3426 neon_store_reg(rn, n, tmp2);
3428 neon_store_reg(rn, n, tmp);
3433 tmp2 = neon_load_reg(rn, pass);
3434 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3435 tcg_temp_free_i32(tmp2);
3438 tmp2 = neon_load_reg(rn, pass);
3439 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3440 tcg_temp_free_i32(tmp2);
3445 neon_store_reg(rn, pass, tmp);
3449 if ((insn & 0x6f) != 0x00)
3451 rn = VFP_SREG_N(insn);
3452 if (insn & ARM_CP_RW_BIT) {
3454 if (insn & (1 << 21)) {
3455 /* system register */
3460 /* VFP2 allows access to FSID from userspace.
3461 VFP3 restricts all id registers to privileged
3464 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3467 tmp = load_cpu_field(vfp.xregs[rn]);
3472 tmp = load_cpu_field(vfp.xregs[rn]);
3474 case ARM_VFP_FPINST:
3475 case ARM_VFP_FPINST2:
3476 /* Not present in VFP3. */
3478 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3481 tmp = load_cpu_field(vfp.xregs[rn]);
3485 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3486 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3488 tmp = tcg_temp_new_i32();
3489 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3493 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3500 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3503 tmp = load_cpu_field(vfp.xregs[rn]);
3509 gen_mov_F0_vreg(0, rn);
3510 tmp = gen_vfp_mrs();
3513 /* Set the 4 flag bits in the CPSR. */
3515 tcg_temp_free_i32(tmp);
3517 store_reg(s, rd, tmp);
3521 if (insn & (1 << 21)) {
3523 /* system register */
3528 /* Writes are ignored. */
3531 tmp = load_reg(s, rd);
3532 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3533 tcg_temp_free_i32(tmp);
3539 /* TODO: VFP subarchitecture support.
3540 * For now, keep the EN bit only */
3541 tmp = load_reg(s, rd);
3542 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3543 store_cpu_field(tmp, vfp.xregs[rn]);
3546 case ARM_VFP_FPINST:
3547 case ARM_VFP_FPINST2:
3551 tmp = load_reg(s, rd);
3552 store_cpu_field(tmp, vfp.xregs[rn]);
3558 tmp = load_reg(s, rd);
3560 gen_mov_vreg_F0(0, rn);
3565 /* data processing */
3566 /* The opcode is in bits 23, 21, 20 and 6. */
3567 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3571 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3573 /* rn is register number */
3574 VFP_DREG_N(rn, insn);
3577 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3578 ((rn & 0x1e) == 0x6))) {
3579 /* Integer or single/half precision destination. */
3580 rd = VFP_SREG_D(insn);
3582 VFP_DREG_D(rd, insn);
3585 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3586 ((rn & 0x1e) == 0x4))) {
3587 /* VCVT from int or half precision is always from S reg
3588 * regardless of dp bit. VCVT with immediate frac_bits
3589 * has same format as SREG_M.
3591 rm = VFP_SREG_M(insn);
3593 VFP_DREG_M(rm, insn);
3596 rn = VFP_SREG_N(insn);
3597 if (op == 15 && rn == 15) {
3598 /* Double precision destination. */
3599 VFP_DREG_D(rd, insn);
3601 rd = VFP_SREG_D(insn);
3603 /* NB that we implicitly rely on the encoding for the frac_bits
3604 * in VCVT of fixed to float being the same as that of an SREG_M
3606 rm = VFP_SREG_M(insn);
3609 veclen = s->vec_len;
3610 if (op == 15 && rn > 3)
3613 /* Shut up compiler warnings. */
3624 /* Figure out what type of vector operation this is. */
3625 if ((rd & bank_mask) == 0) {
3630 delta_d = (s->vec_stride >> 1) + 1;
3632 delta_d = s->vec_stride + 1;
3634 if ((rm & bank_mask) == 0) {
3635 /* mixed scalar/vector */
3644 /* Load the initial operands. */
3649 /* Integer source */
3650 gen_mov_F0_vreg(0, rm);
3655 gen_mov_F0_vreg(dp, rd);
3656 gen_mov_F1_vreg(dp, rm);
3660 /* Compare with zero */
3661 gen_mov_F0_vreg(dp, rd);
3672 /* Source and destination the same. */
3673 gen_mov_F0_vreg(dp, rd);
3679 /* VCVTB, VCVTT: only present with the halfprec extension
3680 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3681 * (we choose to UNDEF)
3683 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3684 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3687 if (!extract32(rn, 1, 1)) {
3688 /* Half precision source. */
3689 gen_mov_F0_vreg(0, rm);
3692 /* Otherwise fall through */
3694 /* One source operand. */
3695 gen_mov_F0_vreg(dp, rm);
3699 /* Two source operands. */
3700 gen_mov_F0_vreg(dp, rn);
3701 gen_mov_F1_vreg(dp, rm);
3705 /* Perform the calculation. */
3707 case 0: /* VMLA: fd + (fn * fm) */
3708 /* Note that order of inputs to the add matters for NaNs */
3710 gen_mov_F0_vreg(dp, rd);
3713 case 1: /* VMLS: fd + -(fn * fm) */
3716 gen_mov_F0_vreg(dp, rd);
3719 case 2: /* VNMLS: -fd + (fn * fm) */
3720 /* Note that it isn't valid to replace (-A + B) with (B - A)
3721 * or similar plausible looking simplifications
3722 * because this will give wrong results for NaNs.
3725 gen_mov_F0_vreg(dp, rd);
3729 case 3: /* VNMLA: -fd + -(fn * fm) */
3732 gen_mov_F0_vreg(dp, rd);
3736 case 4: /* mul: fn * fm */
3739 case 5: /* nmul: -(fn * fm) */
3743 case 6: /* add: fn + fm */
3746 case 7: /* sub: fn - fm */
3749 case 8: /* div: fn / fm */
3752 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3753 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3754 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3755 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3756 /* These are fused multiply-add, and must be done as one
3757 * floating point operation with no rounding between the
3758 * multiplication and addition steps.
3759 * NB that doing the negations here as separate steps is
3760 * correct : an input NaN should come out with its sign bit
3761 * flipped if it is a negated-input.
3763 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3771 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3773 frd = tcg_temp_new_i64();
3774 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3777 gen_helper_vfp_negd(frd, frd);
3779 fpst = get_fpstatus_ptr(0);
3780 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3781 cpu_F1d, frd, fpst);
3782 tcg_temp_free_ptr(fpst);
3783 tcg_temp_free_i64(frd);
3789 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3791 frd = tcg_temp_new_i32();
3792 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3794 gen_helper_vfp_negs(frd, frd);
3796 fpst = get_fpstatus_ptr(0);
3797 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3798 cpu_F1s, frd, fpst);
3799 tcg_temp_free_ptr(fpst);
3800 tcg_temp_free_i32(frd);
3803 case 14: /* fconst */
3804 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3808 n = (insn << 12) & 0x80000000;
3809 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3816 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3823 tcg_gen_movi_i32(cpu_F0s, n);
3826 case 15: /* extension space */
3840 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3842 TCGv_ptr fpst = get_fpstatus_ptr(false);
3843 TCGv_i32 ahp_mode = get_ahp_flag();
3844 tmp = gen_vfp_mrs();
3845 tcg_gen_ext16u_i32(tmp, tmp);
3847 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3850 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3853 tcg_temp_free_i32(ahp_mode);
3854 tcg_temp_free_ptr(fpst);
3855 tcg_temp_free_i32(tmp);
3858 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3860 TCGv_ptr fpst = get_fpstatus_ptr(false);
3861 TCGv_i32 ahp = get_ahp_flag();
3862 tmp = gen_vfp_mrs();
3863 tcg_gen_shri_i32(tmp, tmp, 16);
3865 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3868 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3871 tcg_temp_free_i32(tmp);
3872 tcg_temp_free_i32(ahp);
3873 tcg_temp_free_ptr(fpst);
3876 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3878 TCGv_ptr fpst = get_fpstatus_ptr(false);
3879 TCGv_i32 ahp = get_ahp_flag();
3880 tmp = tcg_temp_new_i32();
3883 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3886 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3889 tcg_temp_free_i32(ahp);
3890 tcg_temp_free_ptr(fpst);
3891 gen_mov_F0_vreg(0, rd);
3892 tmp2 = gen_vfp_mrs();
3893 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3894 tcg_gen_or_i32(tmp, tmp, tmp2);
3895 tcg_temp_free_i32(tmp2);
3899 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3901 TCGv_ptr fpst = get_fpstatus_ptr(false);
3902 TCGv_i32 ahp = get_ahp_flag();
3903 tmp = tcg_temp_new_i32();
3905 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3908 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3911 tcg_temp_free_i32(ahp);
3912 tcg_temp_free_ptr(fpst);
3913 tcg_gen_shli_i32(tmp, tmp, 16);
3914 gen_mov_F0_vreg(0, rd);
3915 tmp2 = gen_vfp_mrs();
3916 tcg_gen_ext16u_i32(tmp2, tmp2);
3917 tcg_gen_or_i32(tmp, tmp, tmp2);
3918 tcg_temp_free_i32(tmp2);
3931 case 11: /* cmpez */
3935 case 12: /* vrintr */
3937 TCGv_ptr fpst = get_fpstatus_ptr(0);
3939 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3941 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3943 tcg_temp_free_ptr(fpst);
3946 case 13: /* vrintz */
3948 TCGv_ptr fpst = get_fpstatus_ptr(0);
3950 tcg_rmode = tcg_const_i32(float_round_to_zero);
3951 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3953 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3955 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3957 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3958 tcg_temp_free_i32(tcg_rmode);
3959 tcg_temp_free_ptr(fpst);
3962 case 14: /* vrintx */
3964 TCGv_ptr fpst = get_fpstatus_ptr(0);
3966 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3968 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3970 tcg_temp_free_ptr(fpst);
3973 case 15: /* single<->double conversion */
3975 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3977 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3979 case 16: /* fuito */
3980 gen_vfp_uito(dp, 0);
3982 case 17: /* fsito */
3983 gen_vfp_sito(dp, 0);
3985 case 20: /* fshto */
3986 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3989 gen_vfp_shto(dp, 16 - rm, 0);
3991 case 21: /* fslto */
3992 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3995 gen_vfp_slto(dp, 32 - rm, 0);
3997 case 22: /* fuhto */
3998 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4001 gen_vfp_uhto(dp, 16 - rm, 0);
4003 case 23: /* fulto */
4004 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4007 gen_vfp_ulto(dp, 32 - rm, 0);
4009 case 24: /* ftoui */
4010 gen_vfp_toui(dp, 0);
4012 case 25: /* ftouiz */
4013 gen_vfp_touiz(dp, 0);
4015 case 26: /* ftosi */
4016 gen_vfp_tosi(dp, 0);
4018 case 27: /* ftosiz */
4019 gen_vfp_tosiz(dp, 0);
4021 case 28: /* ftosh */
4022 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4025 gen_vfp_tosh(dp, 16 - rm, 0);
4027 case 29: /* ftosl */
4028 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4031 gen_vfp_tosl(dp, 32 - rm, 0);
4033 case 30: /* ftouh */
4034 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4037 gen_vfp_touh(dp, 16 - rm, 0);
4039 case 31: /* ftoul */
4040 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4043 gen_vfp_toul(dp, 32 - rm, 0);
4045 default: /* undefined */
4049 default: /* undefined */
4053 /* Write back the result. */
4054 if (op == 15 && (rn >= 8 && rn <= 11)) {
4055 /* Comparison, do nothing. */
4056 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4057 (rn & 0x1e) == 0x6)) {
4058 /* VCVT double to int: always integer result.
4059 * VCVT double to half precision is always a single
4062 gen_mov_vreg_F0(0, rd);
4063 } else if (op == 15 && rn == 15) {
4065 gen_mov_vreg_F0(!dp, rd);
4067 gen_mov_vreg_F0(dp, rd);
4070 /* break out of the loop if we have finished */
4074 if (op == 15 && delta_m == 0) {
4075 /* single source one-many */
4077 rd = ((rd + delta_d) & (bank_mask - 1))
4079 gen_mov_vreg_F0(dp, rd);
4083 /* Setup the next operands. */
4085 rd = ((rd + delta_d) & (bank_mask - 1))
4089 /* One source operand. */
4090 rm = ((rm + delta_m) & (bank_mask - 1))
4092 gen_mov_F0_vreg(dp, rm);
4094 /* Two source operands. */
4095 rn = ((rn + delta_d) & (bank_mask - 1))
4097 gen_mov_F0_vreg(dp, rn);
4099 rm = ((rm + delta_m) & (bank_mask - 1))
4101 gen_mov_F1_vreg(dp, rm);
4109 if ((insn & 0x03e00000) == 0x00400000) {
4110 /* two-register transfer */
4111 rn = (insn >> 16) & 0xf;
4112 rd = (insn >> 12) & 0xf;
4114 VFP_DREG_M(rm, insn);
4116 rm = VFP_SREG_M(insn);
4119 if (insn & ARM_CP_RW_BIT) {
4122 gen_mov_F0_vreg(0, rm * 2);
4123 tmp = gen_vfp_mrs();
4124 store_reg(s, rd, tmp);
4125 gen_mov_F0_vreg(0, rm * 2 + 1);
4126 tmp = gen_vfp_mrs();
4127 store_reg(s, rn, tmp);
4129 gen_mov_F0_vreg(0, rm);
4130 tmp = gen_vfp_mrs();
4131 store_reg(s, rd, tmp);
4132 gen_mov_F0_vreg(0, rm + 1);
4133 tmp = gen_vfp_mrs();
4134 store_reg(s, rn, tmp);
4139 tmp = load_reg(s, rd);
4141 gen_mov_vreg_F0(0, rm * 2);
4142 tmp = load_reg(s, rn);
4144 gen_mov_vreg_F0(0, rm * 2 + 1);
4146 tmp = load_reg(s, rd);
4148 gen_mov_vreg_F0(0, rm);
4149 tmp = load_reg(s, rn);
4151 gen_mov_vreg_F0(0, rm + 1);
4156 rn = (insn >> 16) & 0xf;
4158 VFP_DREG_D(rd, insn);
4160 rd = VFP_SREG_D(insn);
4161 if ((insn & 0x01200000) == 0x01000000) {
4162 /* Single load/store */
4163 offset = (insn & 0xff) << 2;
4164 if ((insn & (1 << 23)) == 0)
4166 if (s->thumb && rn == 15) {
4167 /* This is actually UNPREDICTABLE */
4168 addr = tcg_temp_new_i32();
4169 tcg_gen_movi_i32(addr, s->pc & ~2);
4171 addr = load_reg(s, rn);
4173 tcg_gen_addi_i32(addr, addr, offset);
4174 if (insn & (1 << 20)) {
4175 gen_vfp_ld(s, dp, addr);
4176 gen_mov_vreg_F0(dp, rd);
4178 gen_mov_F0_vreg(dp, rd);
4179 gen_vfp_st(s, dp, addr);
4181 tcg_temp_free_i32(addr);
4183 /* load/store multiple */
4184 int w = insn & (1 << 21);
4186 n = (insn >> 1) & 0x7f;
4190 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4191 /* P == U , W == 1 => UNDEF */
4194 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4195 /* UNPREDICTABLE cases for bad immediates: we choose to
4196 * UNDEF to avoid generating huge numbers of TCG ops
4200 if (rn == 15 && w) {
4201 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4205 if (s->thumb && rn == 15) {
4206 /* This is actually UNPREDICTABLE */
4207 addr = tcg_temp_new_i32();
4208 tcg_gen_movi_i32(addr, s->pc & ~2);
4210 addr = load_reg(s, rn);
4212 if (insn & (1 << 24)) /* pre-decrement */
4213 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4219 for (i = 0; i < n; i++) {
4220 if (insn & ARM_CP_RW_BIT) {
4222 gen_vfp_ld(s, dp, addr);
4223 gen_mov_vreg_F0(dp, rd + i);
4226 gen_mov_F0_vreg(dp, rd + i);
4227 gen_vfp_st(s, dp, addr);
4229 tcg_gen_addi_i32(addr, addr, offset);
4233 if (insn & (1 << 24))
4234 offset = -offset * n;
4235 else if (dp && (insn & 1))
4241 tcg_gen_addi_i32(addr, addr, offset);
4242 store_reg(s, rn, addr);
4244 tcg_temp_free_i32(addr);
4250 /* Should never happen. */
4256 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4258 #ifndef CONFIG_USER_ONLY
4259 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4260 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4266 static void gen_goto_ptr(void)
4268 tcg_gen_lookup_and_goto_ptr();
4271 /* This will end the TB but doesn't guarantee we'll return to
4272 * cpu_loop_exec. Any live exit_requests will be processed as we
4273 * enter the next TB.
4275 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4277 if (use_goto_tb(s, dest)) {
4279 gen_set_pc_im(s, dest);
4280 tcg_gen_exit_tb(s->base.tb, n);
4282 gen_set_pc_im(s, dest);
4285 s->base.is_jmp = DISAS_NORETURN;
4288 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4290 if (unlikely(is_singlestepping(s))) {
4291 /* An indirect jump so that we still trigger the debug exception. */
4296 gen_goto_tb(s, 0, dest);
4300 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4303 tcg_gen_sari_i32(t0, t0, 16);
4307 tcg_gen_sari_i32(t1, t1, 16);
4310 tcg_gen_mul_i32(t0, t0, t1);
4313 /* Return the mask of PSR bits set by a MSR instruction. */
4314 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4319 if (flags & (1 << 0))
4321 if (flags & (1 << 1))
4323 if (flags & (1 << 2))
4325 if (flags & (1 << 3))
4328 /* Mask out undefined bits. */
4329 mask &= ~CPSR_RESERVED;
4330 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4333 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4334 mask &= ~CPSR_Q; /* V5TE in reality*/
4336 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4337 mask &= ~(CPSR_E | CPSR_GE);
4339 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4342 /* Mask out execution state and reserved bits. */
4344 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4346 /* Mask out privileged bits. */
4352 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4353 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4357 /* ??? This is also undefined in system mode. */
4361 tmp = load_cpu_field(spsr);
4362 tcg_gen_andi_i32(tmp, tmp, ~mask);
4363 tcg_gen_andi_i32(t0, t0, mask);
4364 tcg_gen_or_i32(tmp, tmp, t0);
4365 store_cpu_field(tmp, spsr);
4367 gen_set_cpsr(t0, mask);
4369 tcg_temp_free_i32(t0);
4374 /* Returns nonzero if access to the PSR is not permitted. */
4375 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4378 tmp = tcg_temp_new_i32();
4379 tcg_gen_movi_i32(tmp, val);
4380 return gen_set_psr(s, mask, spsr, tmp);
4383 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4384 int *tgtmode, int *regno)
4386 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4387 * the target mode and register number, and identify the various
4388 * unpredictable cases.
4389 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4390 * + executed in user mode
4391 * + using R15 as the src/dest register
4392 * + accessing an unimplemented register
4393 * + accessing a register that's inaccessible at current PL/security state*
4394 * + accessing a register that you could access with a different insn
4395 * We choose to UNDEF in all these cases.
4396 * Since we don't know which of the various AArch32 modes we are in
4397 * we have to defer some checks to runtime.
4398 * Accesses to Monitor mode registers from Secure EL1 (which implies
4399 * that EL3 is AArch64) must trap to EL3.
4401 * If the access checks fail this function will emit code to take
4402 * an exception and return false. Otherwise it will return true,
4403 * and set *tgtmode and *regno appropriately.
4405 int exc_target = default_exception_el(s);
4407 /* These instructions are present only in ARMv8, or in ARMv7 with the
4408 * Virtualization Extensions.
4410 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4411 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4415 if (IS_USER(s) || rn == 15) {
4419 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4420 * of registers into (r, sysm).
4423 /* SPSRs for other modes */
4425 case 0xe: /* SPSR_fiq */
4426 *tgtmode = ARM_CPU_MODE_FIQ;
4428 case 0x10: /* SPSR_irq */
4429 *tgtmode = ARM_CPU_MODE_IRQ;
4431 case 0x12: /* SPSR_svc */
4432 *tgtmode = ARM_CPU_MODE_SVC;
4434 case 0x14: /* SPSR_abt */
4435 *tgtmode = ARM_CPU_MODE_ABT;
4437 case 0x16: /* SPSR_und */
4438 *tgtmode = ARM_CPU_MODE_UND;
4440 case 0x1c: /* SPSR_mon */
4441 *tgtmode = ARM_CPU_MODE_MON;
4443 case 0x1e: /* SPSR_hyp */
4444 *tgtmode = ARM_CPU_MODE_HYP;
4446 default: /* unallocated */
4449 /* We arbitrarily assign SPSR a register number of 16. */
4452 /* general purpose registers for other modes */
4454 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4455 *tgtmode = ARM_CPU_MODE_USR;
4458 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4459 *tgtmode = ARM_CPU_MODE_FIQ;
4462 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4463 *tgtmode = ARM_CPU_MODE_IRQ;
4464 *regno = sysm & 1 ? 13 : 14;
4466 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4467 *tgtmode = ARM_CPU_MODE_SVC;
4468 *regno = sysm & 1 ? 13 : 14;
4470 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4471 *tgtmode = ARM_CPU_MODE_ABT;
4472 *regno = sysm & 1 ? 13 : 14;
4474 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4475 *tgtmode = ARM_CPU_MODE_UND;
4476 *regno = sysm & 1 ? 13 : 14;
4478 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4479 *tgtmode = ARM_CPU_MODE_MON;
4480 *regno = sysm & 1 ? 13 : 14;
4482 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4483 *tgtmode = ARM_CPU_MODE_HYP;
4484 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4485 *regno = sysm & 1 ? 13 : 17;
4487 default: /* unallocated */
4492 /* Catch the 'accessing inaccessible register' cases we can detect
4493 * at translate time.
4496 case ARM_CPU_MODE_MON:
4497 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4500 if (s->current_el == 1) {
4501 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4502 * then accesses to Mon registers trap to EL3
4508 case ARM_CPU_MODE_HYP:
4509 /* Note that we can forbid accesses from EL2 here because they
4510 * must be from Hyp mode itself
4512 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4523 /* If we get here then some access check did not pass */
4524 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4528 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4530 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4531 int tgtmode = 0, regno = 0;
4533 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
4537 /* Sync state because msr_banked() can raise exceptions */
4538 gen_set_condexec(s);
4539 gen_set_pc_im(s, s->pc - 4);
4540 tcg_reg = load_reg(s, rn);
4541 tcg_tgtmode = tcg_const_i32(tgtmode);
4542 tcg_regno = tcg_const_i32(regno);
4543 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4544 tcg_temp_free_i32(tcg_tgtmode);
4545 tcg_temp_free_i32(tcg_regno);
4546 tcg_temp_free_i32(tcg_reg);
4547 s->base.is_jmp = DISAS_UPDATE;
4550 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4552 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4553 int tgtmode = 0, regno = 0;
4555 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
4559 /* Sync state because mrs_banked() can raise exceptions */
4560 gen_set_condexec(s);
4561 gen_set_pc_im(s, s->pc - 4);
4562 tcg_reg = tcg_temp_new_i32();
4563 tcg_tgtmode = tcg_const_i32(tgtmode);
4564 tcg_regno = tcg_const_i32(regno);
4565 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4566 tcg_temp_free_i32(tcg_tgtmode);
4567 tcg_temp_free_i32(tcg_regno);
4568 store_reg(s, rn, tcg_reg);
4569 s->base.is_jmp = DISAS_UPDATE;
4572 /* Store value to PC as for an exception return (ie don't
4573 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4574 * will do the masking based on the new value of the Thumb bit.
4576 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4578 tcg_gen_mov_i32(cpu_R[15], pc);
4579 tcg_temp_free_i32(pc);
4582 /* Generate a v6 exception return. Marks both values as dead. */
4583 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4585 store_pc_exc_ret(s, pc);
4586 /* The cpsr_write_eret helper will mask the low bits of PC
4587 * appropriately depending on the new Thumb bit, so it must
4588 * be called after storing the new PC.
4590 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4593 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4594 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4597 tcg_temp_free_i32(cpsr);
4598 /* Must exit loop to check un-masked IRQs */
4599 s->base.is_jmp = DISAS_EXIT;
4602 /* Generate an old-style exception return. Marks pc as dead. */
4603 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4605 gen_rfe(s, pc, load_cpu_field(spsr));
4609 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4610 * only call the helper when running single threaded TCG code to ensure
4611 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4612 * just skip this instruction. Currently the SEV/SEVL instructions
4613 * which are *one* of many ways to wake the CPU from WFE are not
4614 * implemented so we can't sleep like WFI does.
4616 static void gen_nop_hint(DisasContext *s, int val)
4619 /* When running in MTTCG we don't generate jumps to the yield and
4620 * WFE helpers as it won't affect the scheduling of other vCPUs.
4621 * If we wanted to more completely model WFE/SEV so we don't busy
4622 * spin unnecessarily we would need to do something more involved.
4625 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4626 gen_set_pc_im(s, s->pc);
4627 s->base.is_jmp = DISAS_YIELD;
4631 gen_set_pc_im(s, s->pc);
4632 s->base.is_jmp = DISAS_WFI;
4635 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4636 gen_set_pc_im(s, s->pc);
4637 s->base.is_jmp = DISAS_WFE;
4642 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4648 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4650 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4653 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4654 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4655 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4660 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4663 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4664 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4665 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4670 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4671 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4672 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4673 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4674 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4676 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4677 switch ((size << 1) | u) { \
4679 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4682 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4685 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4688 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4691 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4694 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4696 default: return 1; \
4699 #define GEN_NEON_INTEGER_OP(name) do { \
4700 switch ((size << 1) | u) { \
4702 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4705 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4708 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4711 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4714 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4717 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4719 default: return 1; \
4722 static TCGv_i32 neon_load_scratch(int scratch)
4724 TCGv_i32 tmp = tcg_temp_new_i32();
4725 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4729 static void neon_store_scratch(int scratch, TCGv_i32 var)
4731 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4732 tcg_temp_free_i32(var);
4735 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4739 tmp = neon_load_reg(reg & 7, reg >> 4);
4741 gen_neon_dup_high16(tmp);
4743 gen_neon_dup_low16(tmp);
4746 tmp = neon_load_reg(reg & 15, reg >> 4);
4751 static int gen_neon_unzip(int rd, int rm, int size, int q)
4755 if (!q && size == 2) {
4758 pd = vfp_reg_ptr(true, rd);
4759 pm = vfp_reg_ptr(true, rm);
4763 gen_helper_neon_qunzip8(pd, pm);
4766 gen_helper_neon_qunzip16(pd, pm);
4769 gen_helper_neon_qunzip32(pd, pm);
4777 gen_helper_neon_unzip8(pd, pm);
4780 gen_helper_neon_unzip16(pd, pm);
4786 tcg_temp_free_ptr(pd);
4787 tcg_temp_free_ptr(pm);
4791 static int gen_neon_zip(int rd, int rm, int size, int q)
4795 if (!q && size == 2) {
4798 pd = vfp_reg_ptr(true, rd);
4799 pm = vfp_reg_ptr(true, rm);
4803 gen_helper_neon_qzip8(pd, pm);
4806 gen_helper_neon_qzip16(pd, pm);
4809 gen_helper_neon_qzip32(pd, pm);
4817 gen_helper_neon_zip8(pd, pm);
4820 gen_helper_neon_zip16(pd, pm);
4826 tcg_temp_free_ptr(pd);
4827 tcg_temp_free_ptr(pm);
4831 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4835 rd = tcg_temp_new_i32();
4836 tmp = tcg_temp_new_i32();
4838 tcg_gen_shli_i32(rd, t0, 8);
4839 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4840 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4841 tcg_gen_or_i32(rd, rd, tmp);
4843 tcg_gen_shri_i32(t1, t1, 8);
4844 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4845 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4846 tcg_gen_or_i32(t1, t1, tmp);
4847 tcg_gen_mov_i32(t0, rd);
4849 tcg_temp_free_i32(tmp);
4850 tcg_temp_free_i32(rd);
4853 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4857 rd = tcg_temp_new_i32();
4858 tmp = tcg_temp_new_i32();
4860 tcg_gen_shli_i32(rd, t0, 16);
4861 tcg_gen_andi_i32(tmp, t1, 0xffff);
4862 tcg_gen_or_i32(rd, rd, tmp);
4863 tcg_gen_shri_i32(t1, t1, 16);
4864 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4865 tcg_gen_or_i32(t1, t1, tmp);
4866 tcg_gen_mov_i32(t0, rd);
4868 tcg_temp_free_i32(tmp);
4869 tcg_temp_free_i32(rd);
4877 } neon_ls_element_type[11] = {
4891 /* Translate a NEON load/store element instruction. Return nonzero if the
4892 instruction is invalid. */
4893 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4912 /* FIXME: this access check should not take precedence over UNDEF
4913 * for invalid encodings; we will generate incorrect syndrome information
4914 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4916 if (s->fp_excp_el) {
4917 gen_exception_insn(s, 4, EXCP_UDEF,
4918 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4922 if (!s->vfp_enabled)
4924 VFP_DREG_D(rd, insn);
4925 rn = (insn >> 16) & 0xf;
4927 load = (insn & (1 << 21)) != 0;
4928 if ((insn & (1 << 23)) == 0) {
4929 /* Load store all elements. */
4930 op = (insn >> 8) & 0xf;
4931 size = (insn >> 6) & 3;
4934 /* Catch UNDEF cases for bad values of align field */
4937 if (((insn >> 5) & 1) == 1) {
4942 if (((insn >> 4) & 3) == 3) {
4949 nregs = neon_ls_element_type[op].nregs;
4950 interleave = neon_ls_element_type[op].interleave;
4951 spacing = neon_ls_element_type[op].spacing;
4952 if (size == 3 && (interleave | spacing) != 1)
4954 addr = tcg_temp_new_i32();
4955 load_reg_var(s, addr, rn);
4956 stride = (1 << size) * interleave;
4957 for (reg = 0; reg < nregs; reg++) {
4958 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4959 load_reg_var(s, addr, rn);
4960 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4961 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4962 load_reg_var(s, addr, rn);
4963 tcg_gen_addi_i32(addr, addr, 1 << size);
4966 tmp64 = tcg_temp_new_i64();
4968 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4969 neon_store_reg64(tmp64, rd);
4971 neon_load_reg64(tmp64, rd);
4972 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4974 tcg_temp_free_i64(tmp64);
4975 tcg_gen_addi_i32(addr, addr, stride);
4977 for (pass = 0; pass < 2; pass++) {
4980 tmp = tcg_temp_new_i32();
4981 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4982 neon_store_reg(rd, pass, tmp);
4984 tmp = neon_load_reg(rd, pass);
4985 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4986 tcg_temp_free_i32(tmp);
4988 tcg_gen_addi_i32(addr, addr, stride);
4989 } else if (size == 1) {
4991 tmp = tcg_temp_new_i32();
4992 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4993 tcg_gen_addi_i32(addr, addr, stride);
4994 tmp2 = tcg_temp_new_i32();
4995 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4996 tcg_gen_addi_i32(addr, addr, stride);
4997 tcg_gen_shli_i32(tmp2, tmp2, 16);
4998 tcg_gen_or_i32(tmp, tmp, tmp2);
4999 tcg_temp_free_i32(tmp2);
5000 neon_store_reg(rd, pass, tmp);
5002 tmp = neon_load_reg(rd, pass);
5003 tmp2 = tcg_temp_new_i32();
5004 tcg_gen_shri_i32(tmp2, tmp, 16);
5005 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5006 tcg_temp_free_i32(tmp);
5007 tcg_gen_addi_i32(addr, addr, stride);
5008 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
5009 tcg_temp_free_i32(tmp2);
5010 tcg_gen_addi_i32(addr, addr, stride);
5012 } else /* size == 0 */ {
5015 for (n = 0; n < 4; n++) {
5016 tmp = tcg_temp_new_i32();
5017 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
5018 tcg_gen_addi_i32(addr, addr, stride);
5022 tcg_gen_shli_i32(tmp, tmp, n * 8);
5023 tcg_gen_or_i32(tmp2, tmp2, tmp);
5024 tcg_temp_free_i32(tmp);
5027 neon_store_reg(rd, pass, tmp2);
5029 tmp2 = neon_load_reg(rd, pass);
5030 for (n = 0; n < 4; n++) {
5031 tmp = tcg_temp_new_i32();
5033 tcg_gen_mov_i32(tmp, tmp2);
5035 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5037 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
5038 tcg_temp_free_i32(tmp);
5039 tcg_gen_addi_i32(addr, addr, stride);
5041 tcg_temp_free_i32(tmp2);
5048 tcg_temp_free_i32(addr);
5051 size = (insn >> 10) & 3;
5053 /* Load single element to all lanes. */
5054 int a = (insn >> 4) & 1;
5058 size = (insn >> 6) & 3;
5059 nregs = ((insn >> 8) & 3) + 1;
5062 if (nregs != 4 || a == 0) {
5065 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5068 if (nregs == 1 && a == 1 && size == 0) {
5071 if (nregs == 3 && a == 1) {
5074 addr = tcg_temp_new_i32();
5075 load_reg_var(s, addr, rn);
5077 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5078 tmp = gen_load_and_replicate(s, addr, size);
5079 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5080 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5081 if (insn & (1 << 5)) {
5082 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5083 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5085 tcg_temp_free_i32(tmp);
5087 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5088 stride = (insn & (1 << 5)) ? 2 : 1;
5089 for (reg = 0; reg < nregs; reg++) {
5090 tmp = gen_load_and_replicate(s, addr, size);
5091 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5092 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5093 tcg_temp_free_i32(tmp);
5094 tcg_gen_addi_i32(addr, addr, 1 << size);
5098 tcg_temp_free_i32(addr);
5099 stride = (1 << size) * nregs;
5101 /* Single element. */
5102 int idx = (insn >> 4) & 0xf;
5103 pass = (insn >> 7) & 1;
5106 shift = ((insn >> 5) & 3) * 8;
5110 shift = ((insn >> 6) & 1) * 16;
5111 stride = (insn & (1 << 5)) ? 2 : 1;
5115 stride = (insn & (1 << 6)) ? 2 : 1;
5120 nregs = ((insn >> 8) & 3) + 1;
5121 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5124 if (((idx & (1 << size)) != 0) ||
5125 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5130 if ((idx & 1) != 0) {
5135 if (size == 2 && (idx & 2) != 0) {
5140 if ((size == 2) && ((idx & 3) == 3)) {
5147 if ((rd + stride * (nregs - 1)) > 31) {
5148 /* Attempts to write off the end of the register file
5149 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5150 * the neon_load_reg() would write off the end of the array.
5154 addr = tcg_temp_new_i32();
5155 load_reg_var(s, addr, rn);
5156 for (reg = 0; reg < nregs; reg++) {
5158 tmp = tcg_temp_new_i32();
5161 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
5164 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
5167 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
5169 default: /* Avoid compiler warnings. */
5173 tmp2 = neon_load_reg(rd, pass);
5174 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5175 shift, size ? 16 : 8);
5176 tcg_temp_free_i32(tmp2);
5178 neon_store_reg(rd, pass, tmp);
5179 } else { /* Store */
5180 tmp = neon_load_reg(rd, pass);
5182 tcg_gen_shri_i32(tmp, tmp, shift);
5185 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
5188 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5191 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5194 tcg_temp_free_i32(tmp);
5197 tcg_gen_addi_i32(addr, addr, 1 << size);
5199 tcg_temp_free_i32(addr);
5200 stride = nregs * (1 << size);
5206 base = load_reg(s, rn);
5208 tcg_gen_addi_i32(base, base, stride);
5211 index = load_reg(s, rm);
5212 tcg_gen_add_i32(base, base, index);
5213 tcg_temp_free_i32(index);
5215 store_reg(s, rn, base);
5220 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
5221 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
5223 tcg_gen_and_i32(t, t, c);
5224 tcg_gen_andc_i32(f, f, c);
5225 tcg_gen_or_i32(dest, t, f);
5228 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
5231 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5232 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5233 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5238 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5241 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5242 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5243 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5248 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5251 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5252 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5253 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5258 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5261 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5262 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5263 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5268 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5274 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5275 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5280 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5281 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5288 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5289 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5294 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5295 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5302 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5306 case 0: gen_helper_neon_widen_u8(dest, src); break;
5307 case 1: gen_helper_neon_widen_u16(dest, src); break;
5308 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5313 case 0: gen_helper_neon_widen_s8(dest, src); break;
5314 case 1: gen_helper_neon_widen_s16(dest, src); break;
5315 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5319 tcg_temp_free_i32(src);
5322 static inline void gen_neon_addl(int size)
5325 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5326 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5327 case 2: tcg_gen_add_i64(CPU_V001); break;
5332 static inline void gen_neon_subl(int size)
5335 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5336 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5337 case 2: tcg_gen_sub_i64(CPU_V001); break;
5342 static inline void gen_neon_negl(TCGv_i64 var, int size)
5345 case 0: gen_helper_neon_negl_u16(var, var); break;
5346 case 1: gen_helper_neon_negl_u32(var, var); break;
5348 tcg_gen_neg_i64(var, var);
5354 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5357 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5358 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5363 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5368 switch ((size << 1) | u) {
5369 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5370 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5371 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5372 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5374 tmp = gen_muls_i64_i32(a, b);
5375 tcg_gen_mov_i64(dest, tmp);
5376 tcg_temp_free_i64(tmp);
5379 tmp = gen_mulu_i64_i32(a, b);
5380 tcg_gen_mov_i64(dest, tmp);
5381 tcg_temp_free_i64(tmp);
5386 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5387 Don't forget to clean them now. */
5389 tcg_temp_free_i32(a);
5390 tcg_temp_free_i32(b);
5394 static void gen_neon_narrow_op(int op, int u, int size,
5395 TCGv_i32 dest, TCGv_i64 src)
5399 gen_neon_unarrow_sats(size, dest, src);
5401 gen_neon_narrow(size, dest, src);
5405 gen_neon_narrow_satu(size, dest, src);
5407 gen_neon_narrow_sats(size, dest, src);
5412 /* Symbolic constants for op fields for Neon 3-register same-length.
5413 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5416 #define NEON_3R_VHADD 0
5417 #define NEON_3R_VQADD 1
5418 #define NEON_3R_VRHADD 2
5419 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5420 #define NEON_3R_VHSUB 4
5421 #define NEON_3R_VQSUB 5
5422 #define NEON_3R_VCGT 6
5423 #define NEON_3R_VCGE 7
5424 #define NEON_3R_VSHL 8
5425 #define NEON_3R_VQSHL 9
5426 #define NEON_3R_VRSHL 10
5427 #define NEON_3R_VQRSHL 11
5428 #define NEON_3R_VMAX 12
5429 #define NEON_3R_VMIN 13
5430 #define NEON_3R_VABD 14
5431 #define NEON_3R_VABA 15
5432 #define NEON_3R_VADD_VSUB 16
5433 #define NEON_3R_VTST_VCEQ 17
5434 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5435 #define NEON_3R_VMUL 19
5436 #define NEON_3R_VPMAX 20
5437 #define NEON_3R_VPMIN 21
5438 #define NEON_3R_VQDMULH_VQRDMULH 22
5439 #define NEON_3R_VPADD_VQRDMLAH 23
5440 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5441 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
5442 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5443 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5444 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5445 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5446 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5447 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5449 static const uint8_t neon_3r_sizes[] = {
5450 [NEON_3R_VHADD] = 0x7,
5451 [NEON_3R_VQADD] = 0xf,
5452 [NEON_3R_VRHADD] = 0x7,
5453 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5454 [NEON_3R_VHSUB] = 0x7,
5455 [NEON_3R_VQSUB] = 0xf,
5456 [NEON_3R_VCGT] = 0x7,
5457 [NEON_3R_VCGE] = 0x7,
5458 [NEON_3R_VSHL] = 0xf,
5459 [NEON_3R_VQSHL] = 0xf,
5460 [NEON_3R_VRSHL] = 0xf,
5461 [NEON_3R_VQRSHL] = 0xf,
5462 [NEON_3R_VMAX] = 0x7,
5463 [NEON_3R_VMIN] = 0x7,
5464 [NEON_3R_VABD] = 0x7,
5465 [NEON_3R_VABA] = 0x7,
5466 [NEON_3R_VADD_VSUB] = 0xf,
5467 [NEON_3R_VTST_VCEQ] = 0x7,
5468 [NEON_3R_VML] = 0x7,
5469 [NEON_3R_VMUL] = 0x7,
5470 [NEON_3R_VPMAX] = 0x7,
5471 [NEON_3R_VPMIN] = 0x7,
5472 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5473 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
5474 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5475 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
5476 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5477 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5478 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5479 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5480 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5481 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5484 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5485 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5488 #define NEON_2RM_VREV64 0
5489 #define NEON_2RM_VREV32 1
5490 #define NEON_2RM_VREV16 2
5491 #define NEON_2RM_VPADDL 4
5492 #define NEON_2RM_VPADDL_U 5
5493 #define NEON_2RM_AESE 6 /* Includes AESD */
5494 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5495 #define NEON_2RM_VCLS 8
5496 #define NEON_2RM_VCLZ 9
5497 #define NEON_2RM_VCNT 10
5498 #define NEON_2RM_VMVN 11
5499 #define NEON_2RM_VPADAL 12
5500 #define NEON_2RM_VPADAL_U 13
5501 #define NEON_2RM_VQABS 14
5502 #define NEON_2RM_VQNEG 15
5503 #define NEON_2RM_VCGT0 16
5504 #define NEON_2RM_VCGE0 17
5505 #define NEON_2RM_VCEQ0 18
5506 #define NEON_2RM_VCLE0 19
5507 #define NEON_2RM_VCLT0 20
5508 #define NEON_2RM_SHA1H 21
5509 #define NEON_2RM_VABS 22
5510 #define NEON_2RM_VNEG 23
5511 #define NEON_2RM_VCGT0_F 24
5512 #define NEON_2RM_VCGE0_F 25
5513 #define NEON_2RM_VCEQ0_F 26
5514 #define NEON_2RM_VCLE0_F 27
5515 #define NEON_2RM_VCLT0_F 28
5516 #define NEON_2RM_VABS_F 30
5517 #define NEON_2RM_VNEG_F 31
5518 #define NEON_2RM_VSWP 32
5519 #define NEON_2RM_VTRN 33
5520 #define NEON_2RM_VUZP 34
5521 #define NEON_2RM_VZIP 35
5522 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5523 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5524 #define NEON_2RM_VSHLL 38
5525 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5526 #define NEON_2RM_VRINTN 40
5527 #define NEON_2RM_VRINTX 41
5528 #define NEON_2RM_VRINTA 42
5529 #define NEON_2RM_VRINTZ 43
5530 #define NEON_2RM_VCVT_F16_F32 44
5531 #define NEON_2RM_VRINTM 45
5532 #define NEON_2RM_VCVT_F32_F16 46
5533 #define NEON_2RM_VRINTP 47
5534 #define NEON_2RM_VCVTAU 48
5535 #define NEON_2RM_VCVTAS 49
5536 #define NEON_2RM_VCVTNU 50
5537 #define NEON_2RM_VCVTNS 51
5538 #define NEON_2RM_VCVTPU 52
5539 #define NEON_2RM_VCVTPS 53
5540 #define NEON_2RM_VCVTMU 54
5541 #define NEON_2RM_VCVTMS 55
5542 #define NEON_2RM_VRECPE 56
5543 #define NEON_2RM_VRSQRTE 57
5544 #define NEON_2RM_VRECPE_F 58
5545 #define NEON_2RM_VRSQRTE_F 59
5546 #define NEON_2RM_VCVT_FS 60
5547 #define NEON_2RM_VCVT_FU 61
5548 #define NEON_2RM_VCVT_SF 62
5549 #define NEON_2RM_VCVT_UF 63
5551 static int neon_2rm_is_float_op(int op)
5553 /* Return true if this neon 2reg-misc op is float-to-float */
5554 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5555 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5556 op == NEON_2RM_VRINTM ||
5557 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5558 op >= NEON_2RM_VRECPE_F);
5561 static bool neon_2rm_is_v8_op(int op)
5563 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5565 case NEON_2RM_VRINTN:
5566 case NEON_2RM_VRINTA:
5567 case NEON_2RM_VRINTM:
5568 case NEON_2RM_VRINTP:
5569 case NEON_2RM_VRINTZ:
5570 case NEON_2RM_VRINTX:
5571 case NEON_2RM_VCVTAU:
5572 case NEON_2RM_VCVTAS:
5573 case NEON_2RM_VCVTNU:
5574 case NEON_2RM_VCVTNS:
5575 case NEON_2RM_VCVTPU:
5576 case NEON_2RM_VCVTPS:
5577 case NEON_2RM_VCVTMU:
5578 case NEON_2RM_VCVTMS:
5585 /* Each entry in this array has bit n set if the insn allows
5586 * size value n (otherwise it will UNDEF). Since unallocated
5587 * op values will have no bits set they always UNDEF.
5589 static const uint8_t neon_2rm_sizes[] = {
5590 [NEON_2RM_VREV64] = 0x7,
5591 [NEON_2RM_VREV32] = 0x3,
5592 [NEON_2RM_VREV16] = 0x1,
5593 [NEON_2RM_VPADDL] = 0x7,
5594 [NEON_2RM_VPADDL_U] = 0x7,
5595 [NEON_2RM_AESE] = 0x1,
5596 [NEON_2RM_AESMC] = 0x1,
5597 [NEON_2RM_VCLS] = 0x7,
5598 [NEON_2RM_VCLZ] = 0x7,
5599 [NEON_2RM_VCNT] = 0x1,
5600 [NEON_2RM_VMVN] = 0x1,
5601 [NEON_2RM_VPADAL] = 0x7,
5602 [NEON_2RM_VPADAL_U] = 0x7,
5603 [NEON_2RM_VQABS] = 0x7,
5604 [NEON_2RM_VQNEG] = 0x7,
5605 [NEON_2RM_VCGT0] = 0x7,
5606 [NEON_2RM_VCGE0] = 0x7,
5607 [NEON_2RM_VCEQ0] = 0x7,
5608 [NEON_2RM_VCLE0] = 0x7,
5609 [NEON_2RM_VCLT0] = 0x7,
5610 [NEON_2RM_SHA1H] = 0x4,
5611 [NEON_2RM_VABS] = 0x7,
5612 [NEON_2RM_VNEG] = 0x7,
5613 [NEON_2RM_VCGT0_F] = 0x4,
5614 [NEON_2RM_VCGE0_F] = 0x4,
5615 [NEON_2RM_VCEQ0_F] = 0x4,
5616 [NEON_2RM_VCLE0_F] = 0x4,
5617 [NEON_2RM_VCLT0_F] = 0x4,
5618 [NEON_2RM_VABS_F] = 0x4,
5619 [NEON_2RM_VNEG_F] = 0x4,
5620 [NEON_2RM_VSWP] = 0x1,
5621 [NEON_2RM_VTRN] = 0x7,
5622 [NEON_2RM_VUZP] = 0x7,
5623 [NEON_2RM_VZIP] = 0x7,
5624 [NEON_2RM_VMOVN] = 0x7,
5625 [NEON_2RM_VQMOVN] = 0x7,
5626 [NEON_2RM_VSHLL] = 0x7,
5627 [NEON_2RM_SHA1SU1] = 0x4,
5628 [NEON_2RM_VRINTN] = 0x4,
5629 [NEON_2RM_VRINTX] = 0x4,
5630 [NEON_2RM_VRINTA] = 0x4,
5631 [NEON_2RM_VRINTZ] = 0x4,
5632 [NEON_2RM_VCVT_F16_F32] = 0x2,
5633 [NEON_2RM_VRINTM] = 0x4,
5634 [NEON_2RM_VCVT_F32_F16] = 0x2,
5635 [NEON_2RM_VRINTP] = 0x4,
5636 [NEON_2RM_VCVTAU] = 0x4,
5637 [NEON_2RM_VCVTAS] = 0x4,
5638 [NEON_2RM_VCVTNU] = 0x4,
5639 [NEON_2RM_VCVTNS] = 0x4,
5640 [NEON_2RM_VCVTPU] = 0x4,
5641 [NEON_2RM_VCVTPS] = 0x4,
5642 [NEON_2RM_VCVTMU] = 0x4,
5643 [NEON_2RM_VCVTMS] = 0x4,
5644 [NEON_2RM_VRECPE] = 0x4,
5645 [NEON_2RM_VRSQRTE] = 0x4,
5646 [NEON_2RM_VRECPE_F] = 0x4,
5647 [NEON_2RM_VRSQRTE_F] = 0x4,
5648 [NEON_2RM_VCVT_FS] = 0x4,
5649 [NEON_2RM_VCVT_FU] = 0x4,
5650 [NEON_2RM_VCVT_SF] = 0x4,
5651 [NEON_2RM_VCVT_UF] = 0x4,
5655 /* Expand v8.1 simd helper. */
5656 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5657 int q, int rd, int rn, int rm)
5659 if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
5660 int opr_sz = (1 + q) * 8;
5661 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5662 vfp_reg_offset(1, rn),
5663 vfp_reg_offset(1, rm), cpu_env,
5664 opr_sz, opr_sz, 0, fn);
5670 /* Translate a NEON data processing instruction. Return nonzero if the
5671 instruction is invalid.
5672 We process data in a mixture of 32-bit and 64-bit chunks.
5673 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5675 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5687 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5688 TCGv_ptr ptr1, ptr2, ptr3;
5691 /* FIXME: this access check should not take precedence over UNDEF
5692 * for invalid encodings; we will generate incorrect syndrome information
5693 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5695 if (s->fp_excp_el) {
5696 gen_exception_insn(s, 4, EXCP_UDEF,
5697 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5701 if (!s->vfp_enabled)
5703 q = (insn & (1 << 6)) != 0;
5704 u = (insn >> 24) & 1;
5705 VFP_DREG_D(rd, insn);
5706 VFP_DREG_N(rn, insn);
5707 VFP_DREG_M(rm, insn);
5708 size = (insn >> 20) & 3;
5709 if ((insn & (1 << 23)) == 0) {
5710 /* Three register same length. */
5711 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5712 /* Catch invalid op and bad size combinations: UNDEF */
5713 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5716 /* All insns of this form UNDEF for either this condition or the
5717 * superset of cases "Q==1"; we catch the latter later.
5719 if (q && ((rd | rn | rm) & 1)) {
5724 /* The SHA-1/SHA-256 3-register instructions require special
5725 * treatment here, as their size field is overloaded as an
5726 * op type selector, and they all consume their input in a
5732 if (!u) { /* SHA-1 */
5733 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5736 ptr1 = vfp_reg_ptr(true, rd);
5737 ptr2 = vfp_reg_ptr(true, rn);
5738 ptr3 = vfp_reg_ptr(true, rm);
5739 tmp4 = tcg_const_i32(size);
5740 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
5741 tcg_temp_free_i32(tmp4);
5742 } else { /* SHA-256 */
5743 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5746 ptr1 = vfp_reg_ptr(true, rd);
5747 ptr2 = vfp_reg_ptr(true, rn);
5748 ptr3 = vfp_reg_ptr(true, rm);
5751 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
5754 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
5757 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
5761 tcg_temp_free_ptr(ptr1);
5762 tcg_temp_free_ptr(ptr2);
5763 tcg_temp_free_ptr(ptr3);
5766 case NEON_3R_VPADD_VQRDMLAH:
5773 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5776 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5781 case NEON_3R_VFM_VQRDMLSH:
5792 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5795 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5800 if (size == 3 && op != NEON_3R_LOGIC) {
5801 /* 64-bit element instructions. */
5802 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5803 neon_load_reg64(cpu_V0, rn + pass);
5804 neon_load_reg64(cpu_V1, rm + pass);
5808 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5811 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5817 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5820 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5826 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5828 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5833 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5836 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5842 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5844 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5847 case NEON_3R_VQRSHL:
5849 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5852 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5856 case NEON_3R_VADD_VSUB:
5858 tcg_gen_sub_i64(CPU_V001);
5860 tcg_gen_add_i64(CPU_V001);
5866 neon_store_reg64(cpu_V0, rd + pass);
5875 case NEON_3R_VQRSHL:
5878 /* Shift instruction operands are reversed. */
5884 case NEON_3R_VPADD_VQRDMLAH:
5889 case NEON_3R_FLOAT_ARITH:
5890 pairwise = (u && size < 2); /* if VPADD (float) */
5892 case NEON_3R_FLOAT_MINMAX:
5893 pairwise = u; /* if VPMIN/VPMAX (float) */
5895 case NEON_3R_FLOAT_CMP:
5897 /* no encoding for U=0 C=1x */
5901 case NEON_3R_FLOAT_ACMP:
5906 case NEON_3R_FLOAT_MISC:
5907 /* VMAXNM/VMINNM in ARMv8 */
5908 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5913 if (u && (size != 0)) {
5914 /* UNDEF on invalid size for polynomial subcase */
5918 case NEON_3R_VFM_VQRDMLSH:
5919 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
5927 if (pairwise && q) {
5928 /* All the pairwise insns UNDEF if Q is set */
5932 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5937 tmp = neon_load_reg(rn, 0);
5938 tmp2 = neon_load_reg(rn, 1);
5940 tmp = neon_load_reg(rm, 0);
5941 tmp2 = neon_load_reg(rm, 1);
5945 tmp = neon_load_reg(rn, pass);
5946 tmp2 = neon_load_reg(rm, pass);
5950 GEN_NEON_INTEGER_OP(hadd);
5953 GEN_NEON_INTEGER_OP_ENV(qadd);
5955 case NEON_3R_VRHADD:
5956 GEN_NEON_INTEGER_OP(rhadd);
5958 case NEON_3R_LOGIC: /* Logic ops. */
5959 switch ((u << 2) | size) {
5961 tcg_gen_and_i32(tmp, tmp, tmp2);
5964 tcg_gen_andc_i32(tmp, tmp, tmp2);
5967 tcg_gen_or_i32(tmp, tmp, tmp2);
5970 tcg_gen_orc_i32(tmp, tmp, tmp2);
5973 tcg_gen_xor_i32(tmp, tmp, tmp2);
5976 tmp3 = neon_load_reg(rd, pass);
5977 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5978 tcg_temp_free_i32(tmp3);
5981 tmp3 = neon_load_reg(rd, pass);
5982 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5983 tcg_temp_free_i32(tmp3);
5986 tmp3 = neon_load_reg(rd, pass);
5987 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5988 tcg_temp_free_i32(tmp3);
5993 GEN_NEON_INTEGER_OP(hsub);
5996 GEN_NEON_INTEGER_OP_ENV(qsub);
5999 GEN_NEON_INTEGER_OP(cgt);
6002 GEN_NEON_INTEGER_OP(cge);
6005 GEN_NEON_INTEGER_OP(shl);
6008 GEN_NEON_INTEGER_OP_ENV(qshl);
6011 GEN_NEON_INTEGER_OP(rshl);
6013 case NEON_3R_VQRSHL:
6014 GEN_NEON_INTEGER_OP_ENV(qrshl);
6017 GEN_NEON_INTEGER_OP(max);
6020 GEN_NEON_INTEGER_OP(min);
6023 GEN_NEON_INTEGER_OP(abd);
6026 GEN_NEON_INTEGER_OP(abd);
6027 tcg_temp_free_i32(tmp2);
6028 tmp2 = neon_load_reg(rd, pass);
6029 gen_neon_add(size, tmp, tmp2);
6031 case NEON_3R_VADD_VSUB:
6032 if (!u) { /* VADD */
6033 gen_neon_add(size, tmp, tmp2);
6036 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
6037 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
6038 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
6043 case NEON_3R_VTST_VCEQ:
6044 if (!u) { /* VTST */
6046 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6047 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6048 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
6053 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6054 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6055 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6060 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
6062 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6063 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6064 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6067 tcg_temp_free_i32(tmp2);
6068 tmp2 = neon_load_reg(rd, pass);
6070 gen_neon_rsb(size, tmp, tmp2);
6072 gen_neon_add(size, tmp, tmp2);
6076 if (u) { /* polynomial */
6077 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
6078 } else { /* Integer */
6080 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6081 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6082 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6088 GEN_NEON_INTEGER_OP(pmax);
6091 GEN_NEON_INTEGER_OP(pmin);
6093 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
6094 if (!u) { /* VQDMULH */
6097 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6100 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6104 } else { /* VQRDMULH */
6107 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6110 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6116 case NEON_3R_VPADD_VQRDMLAH:
6118 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6119 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6120 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
6124 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
6126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6127 switch ((u << 2) | size) {
6130 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6133 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
6136 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
6141 tcg_temp_free_ptr(fpstatus);
6144 case NEON_3R_FLOAT_MULTIPLY:
6146 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6147 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6149 tcg_temp_free_i32(tmp2);
6150 tmp2 = neon_load_reg(rd, pass);
6152 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6154 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6157 tcg_temp_free_ptr(fpstatus);
6160 case NEON_3R_FLOAT_CMP:
6162 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6164 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6167 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6169 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6172 tcg_temp_free_ptr(fpstatus);
6175 case NEON_3R_FLOAT_ACMP:
6177 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6179 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6181 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6183 tcg_temp_free_ptr(fpstatus);
6186 case NEON_3R_FLOAT_MINMAX:
6188 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6190 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
6192 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
6194 tcg_temp_free_ptr(fpstatus);
6197 case NEON_3R_FLOAT_MISC:
6200 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6202 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
6204 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
6206 tcg_temp_free_ptr(fpstatus);
6209 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6211 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6215 case NEON_3R_VFM_VQRDMLSH:
6217 /* VFMA, VFMS: fused multiply-add */
6218 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6219 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6222 gen_helper_vfp_negs(tmp, tmp);
6224 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6225 tcg_temp_free_i32(tmp3);
6226 tcg_temp_free_ptr(fpstatus);
6232 tcg_temp_free_i32(tmp2);
6234 /* Save the result. For elementwise operations we can put it
6235 straight into the destination register. For pairwise operations
6236 we have to be careful to avoid clobbering the source operands. */
6237 if (pairwise && rd == rm) {
6238 neon_store_scratch(pass, tmp);
6240 neon_store_reg(rd, pass, tmp);
6244 if (pairwise && rd == rm) {
6245 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6246 tmp = neon_load_scratch(pass);
6247 neon_store_reg(rd, pass, tmp);
6250 /* End of 3 register same size operations. */
6251 } else if (insn & (1 << 4)) {
6252 if ((insn & 0x00380080) != 0) {
6253 /* Two registers and shift. */
6254 op = (insn >> 8) & 0xf;
6255 if (insn & (1 << 7)) {
6263 while ((insn & (1 << (size + 19))) == 0)
6266 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
6267 /* To avoid excessive duplication of ops we implement shift
6268 by immediate using the variable shift operations. */
6270 /* Shift by immediate:
6271 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6272 if (q && ((rd | rm) & 1)) {
6275 if (!u && (op == 4 || op == 6)) {
6278 /* Right shifts are encoded as N - shift, where N is the
6279 element size in bits. */
6281 shift = shift - (1 << (size + 3));
6289 imm = (uint8_t) shift;
6294 imm = (uint16_t) shift;
6305 for (pass = 0; pass < count; pass++) {
6307 neon_load_reg64(cpu_V0, rm + pass);
6308 tcg_gen_movi_i64(cpu_V1, imm);
6313 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6315 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
6320 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6322 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6325 case 5: /* VSHL, VSLI */
6326 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6328 case 6: /* VQSHLU */
6329 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6334 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6337 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6342 if (op == 1 || op == 3) {
6344 neon_load_reg64(cpu_V1, rd + pass);
6345 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6346 } else if (op == 4 || (op == 5 && u)) {
6348 neon_load_reg64(cpu_V1, rd + pass);
6350 if (shift < -63 || shift > 63) {
6354 mask = 0xffffffffffffffffull >> -shift;
6356 mask = 0xffffffffffffffffull << shift;
6359 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6360 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6362 neon_store_reg64(cpu_V0, rd + pass);
6363 } else { /* size < 3 */
6364 /* Operands in T0 and T1. */
6365 tmp = neon_load_reg(rm, pass);
6366 tmp2 = tcg_temp_new_i32();
6367 tcg_gen_movi_i32(tmp2, imm);
6371 GEN_NEON_INTEGER_OP(shl);
6375 GEN_NEON_INTEGER_OP(rshl);
6378 case 5: /* VSHL, VSLI */
6380 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6381 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6382 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6386 case 6: /* VQSHLU */
6389 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6393 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6397 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6405 GEN_NEON_INTEGER_OP_ENV(qshl);
6408 tcg_temp_free_i32(tmp2);
6410 if (op == 1 || op == 3) {
6412 tmp2 = neon_load_reg(rd, pass);
6413 gen_neon_add(size, tmp, tmp2);
6414 tcg_temp_free_i32(tmp2);
6415 } else if (op == 4 || (op == 5 && u)) {
6420 mask = 0xff >> -shift;
6422 mask = (uint8_t)(0xff << shift);
6428 mask = 0xffff >> -shift;
6430 mask = (uint16_t)(0xffff << shift);
6434 if (shift < -31 || shift > 31) {
6438 mask = 0xffffffffu >> -shift;
6440 mask = 0xffffffffu << shift;
6446 tmp2 = neon_load_reg(rd, pass);
6447 tcg_gen_andi_i32(tmp, tmp, mask);
6448 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6449 tcg_gen_or_i32(tmp, tmp, tmp2);
6450 tcg_temp_free_i32(tmp2);
6452 neon_store_reg(rd, pass, tmp);
6455 } else if (op < 10) {
6456 /* Shift by immediate and narrow:
6457 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6458 int input_unsigned = (op == 8) ? !u : u;
6462 shift = shift - (1 << (size + 3));
6465 tmp64 = tcg_const_i64(shift);
6466 neon_load_reg64(cpu_V0, rm);
6467 neon_load_reg64(cpu_V1, rm + 1);
6468 for (pass = 0; pass < 2; pass++) {
6476 if (input_unsigned) {
6477 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6479 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6482 if (input_unsigned) {
6483 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6485 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6488 tmp = tcg_temp_new_i32();
6489 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6490 neon_store_reg(rd, pass, tmp);
6492 tcg_temp_free_i64(tmp64);
6495 imm = (uint16_t)shift;
6499 imm = (uint32_t)shift;
6501 tmp2 = tcg_const_i32(imm);
6502 tmp4 = neon_load_reg(rm + 1, 0);
6503 tmp5 = neon_load_reg(rm + 1, 1);
6504 for (pass = 0; pass < 2; pass++) {
6506 tmp = neon_load_reg(rm, 0);
6510 gen_neon_shift_narrow(size, tmp, tmp2, q,
6513 tmp3 = neon_load_reg(rm, 1);
6517 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6519 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6520 tcg_temp_free_i32(tmp);
6521 tcg_temp_free_i32(tmp3);
6522 tmp = tcg_temp_new_i32();
6523 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6524 neon_store_reg(rd, pass, tmp);
6526 tcg_temp_free_i32(tmp2);
6528 } else if (op == 10) {
6530 if (q || (rd & 1)) {
6533 tmp = neon_load_reg(rm, 0);
6534 tmp2 = neon_load_reg(rm, 1);
6535 for (pass = 0; pass < 2; pass++) {
6539 gen_neon_widen(cpu_V0, tmp, size, u);
6542 /* The shift is less than the width of the source
6543 type, so we can just shift the whole register. */
6544 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6545 /* Widen the result of shift: we need to clear
6546 * the potential overflow bits resulting from
6547 * left bits of the narrow input appearing as
6548 * right bits of left the neighbour narrow
6550 if (size < 2 || !u) {
6553 imm = (0xffu >> (8 - shift));
6555 } else if (size == 1) {
6556 imm = 0xffff >> (16 - shift);
6559 imm = 0xffffffff >> (32 - shift);
6562 imm64 = imm | (((uint64_t)imm) << 32);
6566 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6569 neon_store_reg64(cpu_V0, rd + pass);
6571 } else if (op >= 14) {
6572 /* VCVT fixed-point. */
6573 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6576 /* We have already masked out the must-be-1 top bit of imm6,
6577 * hence this 32-shift where the ARM ARM has 64-imm6.
6580 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6581 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6584 gen_vfp_ulto(0, shift, 1);
6586 gen_vfp_slto(0, shift, 1);
6589 gen_vfp_toul(0, shift, 1);
6591 gen_vfp_tosl(0, shift, 1);
6593 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6598 } else { /* (insn & 0x00380080) == 0 */
6600 if (q && (rd & 1)) {
6604 op = (insn >> 8) & 0xf;
6605 /* One register and immediate. */
6606 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6607 invert = (insn & (1 << 5)) != 0;
6608 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6609 * We choose to not special-case this and will behave as if a
6610 * valid constant encoding of 0 had been given.
6629 imm = (imm << 8) | (imm << 24);
6632 imm = (imm << 8) | 0xff;
6635 imm = (imm << 16) | 0xffff;
6638 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6646 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6647 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6653 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6654 if (op & 1 && op < 12) {
6655 tmp = neon_load_reg(rd, pass);
6657 /* The immediate value has already been inverted, so
6659 tcg_gen_andi_i32(tmp, tmp, imm);
6661 tcg_gen_ori_i32(tmp, tmp, imm);
6665 tmp = tcg_temp_new_i32();
6666 if (op == 14 && invert) {
6670 for (n = 0; n < 4; n++) {
6671 if (imm & (1 << (n + (pass & 1) * 4)))
6672 val |= 0xff << (n * 8);
6674 tcg_gen_movi_i32(tmp, val);
6676 tcg_gen_movi_i32(tmp, imm);
6679 neon_store_reg(rd, pass, tmp);
6682 } else { /* (insn & 0x00800010 == 0x00800000) */
6684 op = (insn >> 8) & 0xf;
6685 if ((insn & (1 << 6)) == 0) {
6686 /* Three registers of different lengths. */
6690 /* undefreq: bit 0 : UNDEF if size == 0
6691 * bit 1 : UNDEF if size == 1
6692 * bit 2 : UNDEF if size == 2
6693 * bit 3 : UNDEF if U == 1
6694 * Note that [2:0] set implies 'always UNDEF'
6697 /* prewiden, src1_wide, src2_wide, undefreq */
6698 static const int neon_3reg_wide[16][4] = {
6699 {1, 0, 0, 0}, /* VADDL */
6700 {1, 1, 0, 0}, /* VADDW */
6701 {1, 0, 0, 0}, /* VSUBL */
6702 {1, 1, 0, 0}, /* VSUBW */
6703 {0, 1, 1, 0}, /* VADDHN */
6704 {0, 0, 0, 0}, /* VABAL */
6705 {0, 1, 1, 0}, /* VSUBHN */
6706 {0, 0, 0, 0}, /* VABDL */
6707 {0, 0, 0, 0}, /* VMLAL */
6708 {0, 0, 0, 9}, /* VQDMLAL */
6709 {0, 0, 0, 0}, /* VMLSL */
6710 {0, 0, 0, 9}, /* VQDMLSL */
6711 {0, 0, 0, 0}, /* Integer VMULL */
6712 {0, 0, 0, 1}, /* VQDMULL */
6713 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6714 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6717 prewiden = neon_3reg_wide[op][0];
6718 src1_wide = neon_3reg_wide[op][1];
6719 src2_wide = neon_3reg_wide[op][2];
6720 undefreq = neon_3reg_wide[op][3];
6722 if ((undefreq & (1 << size)) ||
6723 ((undefreq & 8) && u)) {
6726 if ((src1_wide && (rn & 1)) ||
6727 (src2_wide && (rm & 1)) ||
6728 (!src2_wide && (rd & 1))) {
6732 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6733 * outside the loop below as it only performs a single pass.
6735 if (op == 14 && size == 2) {
6736 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6738 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6741 tcg_rn = tcg_temp_new_i64();
6742 tcg_rm = tcg_temp_new_i64();
6743 tcg_rd = tcg_temp_new_i64();
6744 neon_load_reg64(tcg_rn, rn);
6745 neon_load_reg64(tcg_rm, rm);
6746 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6747 neon_store_reg64(tcg_rd, rd);
6748 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6749 neon_store_reg64(tcg_rd, rd + 1);
6750 tcg_temp_free_i64(tcg_rn);
6751 tcg_temp_free_i64(tcg_rm);
6752 tcg_temp_free_i64(tcg_rd);
6756 /* Avoid overlapping operands. Wide source operands are
6757 always aligned so will never overlap with wide
6758 destinations in problematic ways. */
6759 if (rd == rm && !src2_wide) {
6760 tmp = neon_load_reg(rm, 1);
6761 neon_store_scratch(2, tmp);
6762 } else if (rd == rn && !src1_wide) {
6763 tmp = neon_load_reg(rn, 1);
6764 neon_store_scratch(2, tmp);
6767 for (pass = 0; pass < 2; pass++) {
6769 neon_load_reg64(cpu_V0, rn + pass);
6772 if (pass == 1 && rd == rn) {
6773 tmp = neon_load_scratch(2);
6775 tmp = neon_load_reg(rn, pass);
6778 gen_neon_widen(cpu_V0, tmp, size, u);
6782 neon_load_reg64(cpu_V1, rm + pass);
6785 if (pass == 1 && rd == rm) {
6786 tmp2 = neon_load_scratch(2);
6788 tmp2 = neon_load_reg(rm, pass);
6791 gen_neon_widen(cpu_V1, tmp2, size, u);
6795 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6796 gen_neon_addl(size);
6798 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6799 gen_neon_subl(size);
6801 case 5: case 7: /* VABAL, VABDL */
6802 switch ((size << 1) | u) {
6804 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6807 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6810 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6813 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6816 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6819 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6823 tcg_temp_free_i32(tmp2);
6824 tcg_temp_free_i32(tmp);
6826 case 8: case 9: case 10: case 11: case 12: case 13:
6827 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6828 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6830 case 14: /* Polynomial VMULL */
6831 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6832 tcg_temp_free_i32(tmp2);
6833 tcg_temp_free_i32(tmp);
6835 default: /* 15 is RESERVED: caught earlier */
6840 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6841 neon_store_reg64(cpu_V0, rd + pass);
6842 } else if (op == 5 || (op >= 8 && op <= 11)) {
6844 neon_load_reg64(cpu_V1, rd + pass);
6846 case 10: /* VMLSL */
6847 gen_neon_negl(cpu_V0, size);
6849 case 5: case 8: /* VABAL, VMLAL */
6850 gen_neon_addl(size);
6852 case 9: case 11: /* VQDMLAL, VQDMLSL */
6853 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6855 gen_neon_negl(cpu_V0, size);
6857 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6862 neon_store_reg64(cpu_V0, rd + pass);
6863 } else if (op == 4 || op == 6) {
6864 /* Narrowing operation. */
6865 tmp = tcg_temp_new_i32();
6869 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6872 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6875 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6876 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6883 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6886 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6889 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6890 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6891 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6899 neon_store_reg(rd, 0, tmp3);
6900 neon_store_reg(rd, 1, tmp);
6903 /* Write back the result. */
6904 neon_store_reg64(cpu_V0, rd + pass);
6908 /* Two registers and a scalar. NB that for ops of this form
6909 * the ARM ARM labels bit 24 as Q, but it is in our variable
6916 case 1: /* Float VMLA scalar */
6917 case 5: /* Floating point VMLS scalar */
6918 case 9: /* Floating point VMUL scalar */
6923 case 0: /* Integer VMLA scalar */
6924 case 4: /* Integer VMLS scalar */
6925 case 8: /* Integer VMUL scalar */
6926 case 12: /* VQDMULH scalar */
6927 case 13: /* VQRDMULH scalar */
6928 if (u && ((rd | rn) & 1)) {
6931 tmp = neon_get_scalar(size, rm);
6932 neon_store_scratch(0, tmp);
6933 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6934 tmp = neon_load_scratch(0);
6935 tmp2 = neon_load_reg(rn, pass);
6938 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6940 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6942 } else if (op == 13) {
6944 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6946 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6948 } else if (op & 1) {
6949 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6950 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6951 tcg_temp_free_ptr(fpstatus);
6954 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6955 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6956 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6960 tcg_temp_free_i32(tmp2);
6963 tmp2 = neon_load_reg(rd, pass);
6966 gen_neon_add(size, tmp, tmp2);
6970 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6971 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6972 tcg_temp_free_ptr(fpstatus);
6976 gen_neon_rsb(size, tmp, tmp2);
6980 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6981 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6982 tcg_temp_free_ptr(fpstatus);
6988 tcg_temp_free_i32(tmp2);
6990 neon_store_reg(rd, pass, tmp);
6993 case 3: /* VQDMLAL scalar */
6994 case 7: /* VQDMLSL scalar */
6995 case 11: /* VQDMULL scalar */
7000 case 2: /* VMLAL sclar */
7001 case 6: /* VMLSL scalar */
7002 case 10: /* VMULL scalar */
7006 tmp2 = neon_get_scalar(size, rm);
7007 /* We need a copy of tmp2 because gen_neon_mull
7008 * deletes it during pass 0. */
7009 tmp4 = tcg_temp_new_i32();
7010 tcg_gen_mov_i32(tmp4, tmp2);
7011 tmp3 = neon_load_reg(rn, 1);
7013 for (pass = 0; pass < 2; pass++) {
7015 tmp = neon_load_reg(rn, 0);
7020 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
7022 neon_load_reg64(cpu_V1, rd + pass);
7026 gen_neon_negl(cpu_V0, size);
7029 gen_neon_addl(size);
7032 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7034 gen_neon_negl(cpu_V0, size);
7036 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7042 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7047 neon_store_reg64(cpu_V0, rd + pass);
7050 case 14: /* VQRDMLAH scalar */
7051 case 15: /* VQRDMLSH scalar */
7053 NeonGenThreeOpEnvFn *fn;
7055 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
7058 if (u && ((rd | rn) & 1)) {
7063 fn = gen_helper_neon_qrdmlah_s16;
7065 fn = gen_helper_neon_qrdmlah_s32;
7069 fn = gen_helper_neon_qrdmlsh_s16;
7071 fn = gen_helper_neon_qrdmlsh_s32;
7075 tmp2 = neon_get_scalar(size, rm);
7076 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7077 tmp = neon_load_reg(rn, pass);
7078 tmp3 = neon_load_reg(rd, pass);
7079 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7080 tcg_temp_free_i32(tmp3);
7081 neon_store_reg(rd, pass, tmp);
7083 tcg_temp_free_i32(tmp2);
7087 g_assert_not_reached();
7090 } else { /* size == 3 */
7093 imm = (insn >> 8) & 0xf;
7098 if (q && ((rd | rn | rm) & 1)) {
7103 neon_load_reg64(cpu_V0, rn);
7105 neon_load_reg64(cpu_V1, rn + 1);
7107 } else if (imm == 8) {
7108 neon_load_reg64(cpu_V0, rn + 1);
7110 neon_load_reg64(cpu_V1, rm);
7113 tmp64 = tcg_temp_new_i64();
7115 neon_load_reg64(cpu_V0, rn);
7116 neon_load_reg64(tmp64, rn + 1);
7118 neon_load_reg64(cpu_V0, rn + 1);
7119 neon_load_reg64(tmp64, rm);
7121 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
7122 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
7123 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7125 neon_load_reg64(cpu_V1, rm);
7127 neon_load_reg64(cpu_V1, rm + 1);
7130 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7131 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7132 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
7133 tcg_temp_free_i64(tmp64);
7136 neon_load_reg64(cpu_V0, rn);
7137 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
7138 neon_load_reg64(cpu_V1, rm);
7139 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7140 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7142 neon_store_reg64(cpu_V0, rd);
7144 neon_store_reg64(cpu_V1, rd + 1);
7146 } else if ((insn & (1 << 11)) == 0) {
7147 /* Two register misc. */
7148 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7149 size = (insn >> 18) & 3;
7150 /* UNDEF for unknown op values and bad op-size combinations */
7151 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7154 if (neon_2rm_is_v8_op(op) &&
7155 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7158 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7159 q && ((rm | rd) & 1)) {
7163 case NEON_2RM_VREV64:
7164 for (pass = 0; pass < (q ? 2 : 1); pass++) {
7165 tmp = neon_load_reg(rm, pass * 2);
7166 tmp2 = neon_load_reg(rm, pass * 2 + 1);
7168 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7169 case 1: gen_swap_half(tmp); break;
7170 case 2: /* no-op */ break;
7173 neon_store_reg(rd, pass * 2 + 1, tmp);
7175 neon_store_reg(rd, pass * 2, tmp2);
7178 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7179 case 1: gen_swap_half(tmp2); break;
7182 neon_store_reg(rd, pass * 2, tmp2);
7186 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7187 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
7188 for (pass = 0; pass < q + 1; pass++) {
7189 tmp = neon_load_reg(rm, pass * 2);
7190 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7191 tmp = neon_load_reg(rm, pass * 2 + 1);
7192 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7194 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7195 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7196 case 2: tcg_gen_add_i64(CPU_V001); break;
7199 if (op >= NEON_2RM_VPADAL) {
7201 neon_load_reg64(cpu_V1, rd + pass);
7202 gen_neon_addl(size);
7204 neon_store_reg64(cpu_V0, rd + pass);
7210 for (n = 0; n < (q ? 4 : 2); n += 2) {
7211 tmp = neon_load_reg(rm, n);
7212 tmp2 = neon_load_reg(rd, n + 1);
7213 neon_store_reg(rm, n, tmp2);
7214 neon_store_reg(rd, n + 1, tmp);
7221 if (gen_neon_unzip(rd, rm, size, q)) {
7226 if (gen_neon_zip(rd, rm, size, q)) {
7230 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7231 /* also VQMOVUN; op field and mnemonics don't line up */
7236 for (pass = 0; pass < 2; pass++) {
7237 neon_load_reg64(cpu_V0, rm + pass);
7238 tmp = tcg_temp_new_i32();
7239 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7244 neon_store_reg(rd, 0, tmp2);
7245 neon_store_reg(rd, 1, tmp);
7249 case NEON_2RM_VSHLL:
7250 if (q || (rd & 1)) {
7253 tmp = neon_load_reg(rm, 0);
7254 tmp2 = neon_load_reg(rm, 1);
7255 for (pass = 0; pass < 2; pass++) {
7258 gen_neon_widen(cpu_V0, tmp, size, 1);
7259 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
7260 neon_store_reg64(cpu_V0, rd + pass);
7263 case NEON_2RM_VCVT_F16_F32:
7268 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7272 tmp = tcg_temp_new_i32();
7273 tmp2 = tcg_temp_new_i32();
7274 fpst = get_fpstatus_ptr(true);
7275 ahp = get_ahp_flag();
7276 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
7277 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7278 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
7279 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7280 tcg_gen_shli_i32(tmp2, tmp2, 16);
7281 tcg_gen_or_i32(tmp2, tmp2, tmp);
7282 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
7283 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7284 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7285 neon_store_reg(rd, 0, tmp2);
7286 tmp2 = tcg_temp_new_i32();
7287 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7288 tcg_gen_shli_i32(tmp2, tmp2, 16);
7289 tcg_gen_or_i32(tmp2, tmp2, tmp);
7290 neon_store_reg(rd, 1, tmp2);
7291 tcg_temp_free_i32(tmp);
7292 tcg_temp_free_i32(ahp);
7293 tcg_temp_free_ptr(fpst);
7296 case NEON_2RM_VCVT_F32_F16:
7300 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7304 fpst = get_fpstatus_ptr(true);
7305 ahp = get_ahp_flag();
7306 tmp3 = tcg_temp_new_i32();
7307 tmp = neon_load_reg(rm, 0);
7308 tmp2 = neon_load_reg(rm, 1);
7309 tcg_gen_ext16u_i32(tmp3, tmp);
7310 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7311 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7312 tcg_gen_shri_i32(tmp3, tmp, 16);
7313 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7314 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7315 tcg_temp_free_i32(tmp);
7316 tcg_gen_ext16u_i32(tmp3, tmp2);
7317 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7318 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7319 tcg_gen_shri_i32(tmp3, tmp2, 16);
7320 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7321 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7322 tcg_temp_free_i32(tmp2);
7323 tcg_temp_free_i32(tmp3);
7324 tcg_temp_free_i32(ahp);
7325 tcg_temp_free_ptr(fpst);
7328 case NEON_2RM_AESE: case NEON_2RM_AESMC:
7329 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
7330 || ((rm | rd) & 1)) {
7333 ptr1 = vfp_reg_ptr(true, rd);
7334 ptr2 = vfp_reg_ptr(true, rm);
7336 /* Bit 6 is the lowest opcode bit; it distinguishes between
7337 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7339 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7341 if (op == NEON_2RM_AESE) {
7342 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
7344 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
7346 tcg_temp_free_ptr(ptr1);
7347 tcg_temp_free_ptr(ptr2);
7348 tcg_temp_free_i32(tmp3);
7350 case NEON_2RM_SHA1H:
7351 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
7352 || ((rm | rd) & 1)) {
7355 ptr1 = vfp_reg_ptr(true, rd);
7356 ptr2 = vfp_reg_ptr(true, rm);
7358 gen_helper_crypto_sha1h(ptr1, ptr2);
7360 tcg_temp_free_ptr(ptr1);
7361 tcg_temp_free_ptr(ptr2);
7363 case NEON_2RM_SHA1SU1:
7364 if ((rm | rd) & 1) {
7367 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7369 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
7372 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
7375 ptr1 = vfp_reg_ptr(true, rd);
7376 ptr2 = vfp_reg_ptr(true, rm);
7378 gen_helper_crypto_sha256su0(ptr1, ptr2);
7380 gen_helper_crypto_sha1su1(ptr1, ptr2);
7382 tcg_temp_free_ptr(ptr1);
7383 tcg_temp_free_ptr(ptr2);
7387 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7388 if (neon_2rm_is_float_op(op)) {
7389 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7390 neon_reg_offset(rm, pass));
7393 tmp = neon_load_reg(rm, pass);
7396 case NEON_2RM_VREV32:
7398 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7399 case 1: gen_swap_half(tmp); break;
7403 case NEON_2RM_VREV16:
7408 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7409 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7410 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7416 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7417 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7418 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
7423 gen_helper_neon_cnt_u8(tmp, tmp);
7426 tcg_gen_not_i32(tmp, tmp);
7428 case NEON_2RM_VQABS:
7431 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7434 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7437 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7442 case NEON_2RM_VQNEG:
7445 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7448 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7451 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7456 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7457 tmp2 = tcg_const_i32(0);
7459 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7460 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7461 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7464 tcg_temp_free_i32(tmp2);
7465 if (op == NEON_2RM_VCLE0) {
7466 tcg_gen_not_i32(tmp, tmp);
7469 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7470 tmp2 = tcg_const_i32(0);
7472 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7473 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7474 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7477 tcg_temp_free_i32(tmp2);
7478 if (op == NEON_2RM_VCLT0) {
7479 tcg_gen_not_i32(tmp, tmp);
7482 case NEON_2RM_VCEQ0:
7483 tmp2 = tcg_const_i32(0);
7485 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7486 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7487 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7490 tcg_temp_free_i32(tmp2);
7494 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7495 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7496 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7501 tmp2 = tcg_const_i32(0);
7502 gen_neon_rsb(size, tmp, tmp2);
7503 tcg_temp_free_i32(tmp2);
7505 case NEON_2RM_VCGT0_F:
7507 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7508 tmp2 = tcg_const_i32(0);
7509 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7510 tcg_temp_free_i32(tmp2);
7511 tcg_temp_free_ptr(fpstatus);
7514 case NEON_2RM_VCGE0_F:
7516 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7517 tmp2 = tcg_const_i32(0);
7518 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7519 tcg_temp_free_i32(tmp2);
7520 tcg_temp_free_ptr(fpstatus);
7523 case NEON_2RM_VCEQ0_F:
7525 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7526 tmp2 = tcg_const_i32(0);
7527 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7528 tcg_temp_free_i32(tmp2);
7529 tcg_temp_free_ptr(fpstatus);
7532 case NEON_2RM_VCLE0_F:
7534 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7535 tmp2 = tcg_const_i32(0);
7536 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7537 tcg_temp_free_i32(tmp2);
7538 tcg_temp_free_ptr(fpstatus);
7541 case NEON_2RM_VCLT0_F:
7543 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7544 tmp2 = tcg_const_i32(0);
7545 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7546 tcg_temp_free_i32(tmp2);
7547 tcg_temp_free_ptr(fpstatus);
7550 case NEON_2RM_VABS_F:
7553 case NEON_2RM_VNEG_F:
7557 tmp2 = neon_load_reg(rd, pass);
7558 neon_store_reg(rm, pass, tmp2);
7561 tmp2 = neon_load_reg(rd, pass);
7563 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7564 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7567 neon_store_reg(rm, pass, tmp2);
7569 case NEON_2RM_VRINTN:
7570 case NEON_2RM_VRINTA:
7571 case NEON_2RM_VRINTM:
7572 case NEON_2RM_VRINTP:
7573 case NEON_2RM_VRINTZ:
7576 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7579 if (op == NEON_2RM_VRINTZ) {
7580 rmode = FPROUNDING_ZERO;
7582 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7585 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7586 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7588 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7589 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7591 tcg_temp_free_ptr(fpstatus);
7592 tcg_temp_free_i32(tcg_rmode);
7595 case NEON_2RM_VRINTX:
7597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7598 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7599 tcg_temp_free_ptr(fpstatus);
7602 case NEON_2RM_VCVTAU:
7603 case NEON_2RM_VCVTAS:
7604 case NEON_2RM_VCVTNU:
7605 case NEON_2RM_VCVTNS:
7606 case NEON_2RM_VCVTPU:
7607 case NEON_2RM_VCVTPS:
7608 case NEON_2RM_VCVTMU:
7609 case NEON_2RM_VCVTMS:
7611 bool is_signed = !extract32(insn, 7, 1);
7612 TCGv_ptr fpst = get_fpstatus_ptr(1);
7613 TCGv_i32 tcg_rmode, tcg_shift;
7614 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7616 tcg_shift = tcg_const_i32(0);
7617 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7618 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7622 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7625 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7629 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7631 tcg_temp_free_i32(tcg_rmode);
7632 tcg_temp_free_i32(tcg_shift);
7633 tcg_temp_free_ptr(fpst);
7636 case NEON_2RM_VRECPE:
7638 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7639 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7640 tcg_temp_free_ptr(fpstatus);
7643 case NEON_2RM_VRSQRTE:
7645 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7646 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7647 tcg_temp_free_ptr(fpstatus);
7650 case NEON_2RM_VRECPE_F:
7652 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7653 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7654 tcg_temp_free_ptr(fpstatus);
7657 case NEON_2RM_VRSQRTE_F:
7659 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7660 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7661 tcg_temp_free_ptr(fpstatus);
7664 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7667 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7670 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7671 gen_vfp_tosiz(0, 1);
7673 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7674 gen_vfp_touiz(0, 1);
7677 /* Reserved op values were caught by the
7678 * neon_2rm_sizes[] check earlier.
7682 if (neon_2rm_is_float_op(op)) {
7683 tcg_gen_st_f32(cpu_F0s, cpu_env,
7684 neon_reg_offset(rd, pass));
7686 neon_store_reg(rd, pass, tmp);
7691 } else if ((insn & (1 << 10)) == 0) {
7693 int n = ((insn >> 8) & 3) + 1;
7694 if ((rn + n) > 32) {
7695 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7696 * helper function running off the end of the register file.
7701 if (insn & (1 << 6)) {
7702 tmp = neon_load_reg(rd, 0);
7704 tmp = tcg_temp_new_i32();
7705 tcg_gen_movi_i32(tmp, 0);
7707 tmp2 = neon_load_reg(rm, 0);
7708 ptr1 = vfp_reg_ptr(true, rn);
7709 tmp5 = tcg_const_i32(n);
7710 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7711 tcg_temp_free_i32(tmp);
7712 if (insn & (1 << 6)) {
7713 tmp = neon_load_reg(rd, 1);
7715 tmp = tcg_temp_new_i32();
7716 tcg_gen_movi_i32(tmp, 0);
7718 tmp3 = neon_load_reg(rm, 1);
7719 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
7720 tcg_temp_free_i32(tmp5);
7721 tcg_temp_free_ptr(ptr1);
7722 neon_store_reg(rd, 0, tmp2);
7723 neon_store_reg(rd, 1, tmp3);
7724 tcg_temp_free_i32(tmp);
7725 } else if ((insn & 0x380) == 0) {
7727 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7730 if (insn & (1 << 19)) {
7731 tmp = neon_load_reg(rm, 1);
7733 tmp = neon_load_reg(rm, 0);
7735 if (insn & (1 << 16)) {
7736 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7737 } else if (insn & (1 << 17)) {
7738 if ((insn >> 18) & 1)
7739 gen_neon_dup_high16(tmp);
7741 gen_neon_dup_low16(tmp);
7743 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7744 tmp2 = tcg_temp_new_i32();
7745 tcg_gen_mov_i32(tmp2, tmp);
7746 neon_store_reg(rd, pass, tmp2);
7748 tcg_temp_free_i32(tmp);
7757 /* Advanced SIMD three registers of the same length extension.
7758 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7759 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7760 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7761 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7763 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7765 gen_helper_gvec_3 *fn_gvec = NULL;
7766 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7767 int rd, rn, rm, opr_sz;
7771 q = extract32(insn, 6, 1);
7772 VFP_DREG_D(rd, insn);
7773 VFP_DREG_N(rn, insn);
7774 VFP_DREG_M(rm, insn);
7775 if ((rd | rn | rm) & q) {
7779 if ((insn & 0xfe200f10) == 0xfc200800) {
7780 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
7781 int size = extract32(insn, 20, 1);
7782 data = extract32(insn, 23, 2); /* rot */
7783 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7784 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7787 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7788 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7789 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
7790 int size = extract32(insn, 20, 1);
7791 data = extract32(insn, 24, 1); /* rot */
7792 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7793 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7796 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
7797 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
7798 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7799 bool u = extract32(insn, 4, 1);
7800 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7803 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
7808 if (s->fp_excp_el) {
7809 gen_exception_insn(s, 4, EXCP_UDEF,
7810 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7813 if (!s->vfp_enabled) {
7817 opr_sz = (1 + q) * 8;
7819 TCGv_ptr fpst = get_fpstatus_ptr(1);
7820 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7821 vfp_reg_offset(1, rn),
7822 vfp_reg_offset(1, rm), fpst,
7823 opr_sz, opr_sz, data, fn_gvec_ptr);
7824 tcg_temp_free_ptr(fpst);
7826 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7827 vfp_reg_offset(1, rn),
7828 vfp_reg_offset(1, rm),
7829 opr_sz, opr_sz, data, fn_gvec);
7834 /* Advanced SIMD two registers and a scalar extension.
7835 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7836 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7837 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7838 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7842 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7844 gen_helper_gvec_3 *fn_gvec = NULL;
7845 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7846 int rd, rn, rm, opr_sz, data;
7849 q = extract32(insn, 6, 1);
7850 VFP_DREG_D(rd, insn);
7851 VFP_DREG_N(rn, insn);
7852 if ((rd | rn) & q) {
7856 if ((insn & 0xff000f10) == 0xfe000800) {
7857 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7858 int rot = extract32(insn, 20, 2);
7859 int size = extract32(insn, 23, 1);
7862 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
7866 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7869 /* For fp16, rm is just Vm, and index is M. */
7870 rm = extract32(insn, 0, 4);
7871 index = extract32(insn, 5, 1);
7873 /* For fp32, rm is the usual M:Vm, and index is 0. */
7874 VFP_DREG_M(rm, insn);
7877 data = (index << 2) | rot;
7878 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7879 : gen_helper_gvec_fcmlah_idx);
7880 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7881 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7882 int u = extract32(insn, 4, 1);
7883 if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
7886 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7887 /* rm is just Vm, and index is M. */
7888 data = extract32(insn, 5, 1); /* index */
7889 rm = extract32(insn, 0, 4);
7894 if (s->fp_excp_el) {
7895 gen_exception_insn(s, 4, EXCP_UDEF,
7896 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7899 if (!s->vfp_enabled) {
7903 opr_sz = (1 + q) * 8;
7905 TCGv_ptr fpst = get_fpstatus_ptr(1);
7906 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7907 vfp_reg_offset(1, rn),
7908 vfp_reg_offset(1, rm), fpst,
7909 opr_sz, opr_sz, data, fn_gvec_ptr);
7910 tcg_temp_free_ptr(fpst);
7912 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
7913 vfp_reg_offset(1, rn),
7914 vfp_reg_offset(1, rm),
7915 opr_sz, opr_sz, data, fn_gvec);
7920 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7922 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7923 const ARMCPRegInfo *ri;
7925 cpnum = (insn >> 8) & 0xf;
7927 /* First check for coprocessor space used for XScale/iwMMXt insns */
7928 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7929 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7932 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7933 return disas_iwmmxt_insn(s, insn);
7934 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7935 return disas_dsp_insn(s, insn);
7940 /* Otherwise treat as a generic register access */
7941 is64 = (insn & (1 << 25)) == 0;
7942 if (!is64 && ((insn & (1 << 4)) == 0)) {
7950 opc1 = (insn >> 4) & 0xf;
7952 rt2 = (insn >> 16) & 0xf;
7954 crn = (insn >> 16) & 0xf;
7955 opc1 = (insn >> 21) & 7;
7956 opc2 = (insn >> 5) & 7;
7959 isread = (insn >> 20) & 1;
7960 rt = (insn >> 12) & 0xf;
7962 ri = get_arm_cp_reginfo(s->cp_regs,
7963 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7965 /* Check access permissions */
7966 if (!cp_access_ok(s->current_el, ri, isread)) {
7971 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7972 /* Emit code to perform further access permissions checks at
7973 * runtime; this may result in an exception.
7974 * Note that on XScale all cp0..c13 registers do an access check
7975 * call in order to handle c15_cpar.
7978 TCGv_i32 tcg_syn, tcg_isread;
7981 /* Note that since we are an implementation which takes an
7982 * exception on a trapped conditional instruction only if the
7983 * instruction passes its condition code check, we can take
7984 * advantage of the clause in the ARM ARM that allows us to set
7985 * the COND field in the instruction to 0xE in all cases.
7986 * We could fish the actual condition out of the insn (ARM)
7987 * or the condexec bits (Thumb) but it isn't necessary.
7992 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7995 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
8001 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
8004 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
8009 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8010 * so this can only happen if this is an ARMv7 or earlier CPU,
8011 * in which case the syndrome information won't actually be
8014 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8015 syndrome = syn_uncategorized();
8019 gen_set_condexec(s);
8020 gen_set_pc_im(s, s->pc - 4);
8021 tmpptr = tcg_const_ptr(ri);
8022 tcg_syn = tcg_const_i32(syndrome);
8023 tcg_isread = tcg_const_i32(isread);
8024 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8026 tcg_temp_free_ptr(tmpptr);
8027 tcg_temp_free_i32(tcg_syn);
8028 tcg_temp_free_i32(tcg_isread);
8031 /* Handle special cases first */
8032 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8039 gen_set_pc_im(s, s->pc);
8040 s->base.is_jmp = DISAS_WFI;
8046 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8055 if (ri->type & ARM_CP_CONST) {
8056 tmp64 = tcg_const_i64(ri->resetvalue);
8057 } else if (ri->readfn) {
8059 tmp64 = tcg_temp_new_i64();
8060 tmpptr = tcg_const_ptr(ri);
8061 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8062 tcg_temp_free_ptr(tmpptr);
8064 tmp64 = tcg_temp_new_i64();
8065 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8067 tmp = tcg_temp_new_i32();
8068 tcg_gen_extrl_i64_i32(tmp, tmp64);
8069 store_reg(s, rt, tmp);
8070 tcg_gen_shri_i64(tmp64, tmp64, 32);
8071 tmp = tcg_temp_new_i32();
8072 tcg_gen_extrl_i64_i32(tmp, tmp64);
8073 tcg_temp_free_i64(tmp64);
8074 store_reg(s, rt2, tmp);
8077 if (ri->type & ARM_CP_CONST) {
8078 tmp = tcg_const_i32(ri->resetvalue);
8079 } else if (ri->readfn) {
8081 tmp = tcg_temp_new_i32();
8082 tmpptr = tcg_const_ptr(ri);
8083 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8084 tcg_temp_free_ptr(tmpptr);
8086 tmp = load_cpu_offset(ri->fieldoffset);
8089 /* Destination register of r15 for 32 bit loads sets
8090 * the condition codes from the high 4 bits of the value
8093 tcg_temp_free_i32(tmp);
8095 store_reg(s, rt, tmp);
8100 if (ri->type & ARM_CP_CONST) {
8101 /* If not forbidden by access permissions, treat as WI */
8106 TCGv_i32 tmplo, tmphi;
8107 TCGv_i64 tmp64 = tcg_temp_new_i64();
8108 tmplo = load_reg(s, rt);
8109 tmphi = load_reg(s, rt2);
8110 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8111 tcg_temp_free_i32(tmplo);
8112 tcg_temp_free_i32(tmphi);
8114 TCGv_ptr tmpptr = tcg_const_ptr(ri);
8115 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8116 tcg_temp_free_ptr(tmpptr);
8118 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8120 tcg_temp_free_i64(tmp64);
8125 tmp = load_reg(s, rt);
8126 tmpptr = tcg_const_ptr(ri);
8127 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8128 tcg_temp_free_ptr(tmpptr);
8129 tcg_temp_free_i32(tmp);
8131 TCGv_i32 tmp = load_reg(s, rt);
8132 store_cpu_offset(tmp, ri->fieldoffset);
8137 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8138 /* I/O operations must end the TB here (whether read or write) */
8141 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
8142 /* We default to ending the TB on a coprocessor register write,
8143 * but allow this to be suppressed by the register definition
8144 * (usually only necessary to work around guest bugs).
8152 /* Unknown register; this might be a guest error or a QEMU
8153 * unimplemented feature.
8156 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8157 "64 bit system register cp:%d opc1: %d crm:%d "
8159 isread ? "read" : "write", cpnum, opc1, crm,
8160 s->ns ? "non-secure" : "secure");
8162 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8163 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8165 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8166 s->ns ? "non-secure" : "secure");
8173 /* Store a 64-bit value to a register pair. Clobbers val. */
8174 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
8177 tmp = tcg_temp_new_i32();
8178 tcg_gen_extrl_i64_i32(tmp, val);
8179 store_reg(s, rlow, tmp);
8180 tmp = tcg_temp_new_i32();
8181 tcg_gen_shri_i64(val, val, 32);
8182 tcg_gen_extrl_i64_i32(tmp, val);
8183 store_reg(s, rhigh, tmp);
8186 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
8187 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
8192 /* Load value and extend to 64 bits. */
8193 tmp = tcg_temp_new_i64();
8194 tmp2 = load_reg(s, rlow);
8195 tcg_gen_extu_i32_i64(tmp, tmp2);
8196 tcg_temp_free_i32(tmp2);
8197 tcg_gen_add_i64(val, val, tmp);
8198 tcg_temp_free_i64(tmp);
8201 /* load and add a 64-bit value from a register pair. */
8202 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
8208 /* Load 64-bit value rd:rn. */
8209 tmpl = load_reg(s, rlow);
8210 tmph = load_reg(s, rhigh);
8211 tmp = tcg_temp_new_i64();
8212 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
8213 tcg_temp_free_i32(tmpl);
8214 tcg_temp_free_i32(tmph);
8215 tcg_gen_add_i64(val, val, tmp);
8216 tcg_temp_free_i64(tmp);
8219 /* Set N and Z flags from hi|lo. */
8220 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
8222 tcg_gen_mov_i32(cpu_NF, hi);
8223 tcg_gen_or_i32(cpu_ZF, lo, hi);
8226 /* Load/Store exclusive instructions are implemented by remembering
8227 the value/address loaded, and seeing if these are the same
8228 when the store is performed. This should be sufficient to implement
8229 the architecturally mandated semantics, and avoids having to monitor
8230 regular stores. The compare vs the remembered value is done during
8231 the cmpxchg operation, but we must compare the addresses manually. */
8232 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
8233 TCGv_i32 addr, int size)
8235 TCGv_i32 tmp = tcg_temp_new_i32();
8236 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8241 TCGv_i32 tmp2 = tcg_temp_new_i32();
8242 TCGv_i64 t64 = tcg_temp_new_i64();
8244 /* For AArch32, architecturally the 32-bit word at the lowest
8245 * address is always Rt and the one at addr+4 is Rt2, even if
8246 * the CPU is big-endian. That means we don't want to do a
8247 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8248 * for an architecturally 64-bit access, but instead do a
8249 * 64-bit access using MO_BE if appropriate and then split
8251 * This only makes a difference for BE32 user-mode, where
8252 * frob64() must not flip the two halves of the 64-bit data
8253 * but this code must treat BE32 user-mode like BE32 system.
8255 TCGv taddr = gen_aa32_addr(s, addr, opc);
8257 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8258 tcg_temp_free(taddr);
8259 tcg_gen_mov_i64(cpu_exclusive_val, t64);
8260 if (s->be_data == MO_BE) {
8261 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8263 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8265 tcg_temp_free_i64(t64);
8267 store_reg(s, rt2, tmp2);
8269 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
8270 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
8273 store_reg(s, rt, tmp);
8274 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
8277 static void gen_clrex(DisasContext *s)
8279 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8282 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
8283 TCGv_i32 addr, int size)
8285 TCGv_i32 t0, t1, t2;
8288 TCGLabel *done_label;
8289 TCGLabel *fail_label;
8290 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8292 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8298 fail_label = gen_new_label();
8299 done_label = gen_new_label();
8300 extaddr = tcg_temp_new_i64();
8301 tcg_gen_extu_i32_i64(extaddr, addr);
8302 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8303 tcg_temp_free_i64(extaddr);
8305 taddr = gen_aa32_addr(s, addr, opc);
8306 t0 = tcg_temp_new_i32();
8307 t1 = load_reg(s, rt);
8309 TCGv_i64 o64 = tcg_temp_new_i64();
8310 TCGv_i64 n64 = tcg_temp_new_i64();
8312 t2 = load_reg(s, rt2);
8313 /* For AArch32, architecturally the 32-bit word at the lowest
8314 * address is always Rt and the one at addr+4 is Rt2, even if
8315 * the CPU is big-endian. Since we're going to treat this as a
8316 * single 64-bit BE store, we need to put the two halves in the
8317 * opposite order for BE to LE, so that they end up in the right
8319 * We don't want gen_aa32_frob64() because that does the wrong
8320 * thing for BE32 usermode.
8322 if (s->be_data == MO_BE) {
8323 tcg_gen_concat_i32_i64(n64, t2, t1);
8325 tcg_gen_concat_i32_i64(n64, t1, t2);
8327 tcg_temp_free_i32(t2);
8329 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8330 get_mem_index(s), opc);
8331 tcg_temp_free_i64(n64);
8333 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8334 tcg_gen_extrl_i64_i32(t0, o64);
8336 tcg_temp_free_i64(o64);
8338 t2 = tcg_temp_new_i32();
8339 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8340 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8341 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8342 tcg_temp_free_i32(t2);
8344 tcg_temp_free_i32(t1);
8345 tcg_temp_free(taddr);
8346 tcg_gen_mov_i32(cpu_R[rd], t0);
8347 tcg_temp_free_i32(t0);
8348 tcg_gen_br(done_label);
8350 gen_set_label(fail_label);
8351 tcg_gen_movi_i32(cpu_R[rd], 1);
8352 gen_set_label(done_label);
8353 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8359 * @mode: mode field from insn (which stack to store to)
8360 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8361 * @writeback: true if writeback bit set
8363 * Generate code for the SRS (Store Return State) insn.
8365 static void gen_srs(DisasContext *s,
8366 uint32_t mode, uint32_t amode, bool writeback)
8373 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
8374 * and specified mode is monitor mode
8375 * - UNDEFINED in Hyp mode
8376 * - UNPREDICTABLE in User or System mode
8377 * - UNPREDICTABLE if the specified mode is:
8378 * -- not implemented
8379 * -- not a valid mode number
8380 * -- a mode that's at a higher exception level
8381 * -- Monitor, if we are Non-secure
8382 * For the UNPREDICTABLE cases we choose to UNDEF.
8384 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
8385 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8389 if (s->current_el == 0 || s->current_el == 2) {
8394 case ARM_CPU_MODE_USR:
8395 case ARM_CPU_MODE_FIQ:
8396 case ARM_CPU_MODE_IRQ:
8397 case ARM_CPU_MODE_SVC:
8398 case ARM_CPU_MODE_ABT:
8399 case ARM_CPU_MODE_UND:
8400 case ARM_CPU_MODE_SYS:
8402 case ARM_CPU_MODE_HYP:
8403 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8407 case ARM_CPU_MODE_MON:
8408 /* No need to check specifically for "are we non-secure" because
8409 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8410 * so if this isn't EL3 then we must be non-secure.
8412 if (s->current_el != 3) {
8421 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8422 default_exception_el(s));
8426 addr = tcg_temp_new_i32();
8427 tmp = tcg_const_i32(mode);
8428 /* get_r13_banked() will raise an exception if called from System mode */
8429 gen_set_condexec(s);
8430 gen_set_pc_im(s, s->pc - 4);
8431 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8432 tcg_temp_free_i32(tmp);
8449 tcg_gen_addi_i32(addr, addr, offset);
8450 tmp = load_reg(s, 14);
8451 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8452 tcg_temp_free_i32(tmp);
8453 tmp = load_cpu_field(spsr);
8454 tcg_gen_addi_i32(addr, addr, 4);
8455 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8456 tcg_temp_free_i32(tmp);
8474 tcg_gen_addi_i32(addr, addr, offset);
8475 tmp = tcg_const_i32(mode);
8476 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8477 tcg_temp_free_i32(tmp);
8479 tcg_temp_free_i32(addr);
8480 s->base.is_jmp = DISAS_UPDATE;
8483 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8485 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8492 /* M variants do not implement ARM mode; this must raise the INVSTATE
8493 * UsageFault exception.
8495 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8496 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8497 default_exception_el(s));
8502 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8503 * choose to UNDEF. In ARMv5 and above the space is used
8504 * for miscellaneous unconditional instructions.
8508 /* Unconditional instructions. */
8509 if (((insn >> 25) & 7) == 1) {
8510 /* NEON Data processing. */
8511 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8515 if (disas_neon_data_insn(s, insn)) {
8520 if ((insn & 0x0f100000) == 0x04000000) {
8521 /* NEON load/store. */
8522 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8526 if (disas_neon_ls_insn(s, insn)) {
8531 if ((insn & 0x0f000e10) == 0x0e000a00) {
8533 if (disas_vfp_insn(s, insn)) {
8538 if (((insn & 0x0f30f000) == 0x0510f000) ||
8539 ((insn & 0x0f30f010) == 0x0710f000)) {
8540 if ((insn & (1 << 22)) == 0) {
8542 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8546 /* Otherwise PLD; v5TE+ */
8550 if (((insn & 0x0f70f000) == 0x0450f000) ||
8551 ((insn & 0x0f70f010) == 0x0650f000)) {
8553 return; /* PLI; V7 */
8555 if (((insn & 0x0f700000) == 0x04100000) ||
8556 ((insn & 0x0f700010) == 0x06100000)) {
8557 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8560 return; /* v7MP: Unallocated memory hint: must NOP */
8563 if ((insn & 0x0ffffdff) == 0x01010000) {
8566 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8567 gen_helper_setend(cpu_env);
8568 s->base.is_jmp = DISAS_UPDATE;
8571 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8572 switch ((insn >> 4) & 0xf) {
8580 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8583 /* We need to break the TB after this insn to execute
8584 * self-modifying code correctly and also to take
8585 * any pending interrupts immediately.
8587 gen_goto_tb(s, 0, s->pc & ~1);
8592 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8595 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8597 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8603 rn = (insn >> 16) & 0xf;
8604 addr = load_reg(s, rn);
8605 i = (insn >> 23) & 3;
8607 case 0: offset = -4; break; /* DA */
8608 case 1: offset = 0; break; /* IA */
8609 case 2: offset = -8; break; /* DB */
8610 case 3: offset = 4; break; /* IB */
8614 tcg_gen_addi_i32(addr, addr, offset);
8615 /* Load PC into tmp and CPSR into tmp2. */
8616 tmp = tcg_temp_new_i32();
8617 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8618 tcg_gen_addi_i32(addr, addr, 4);
8619 tmp2 = tcg_temp_new_i32();
8620 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8621 if (insn & (1 << 21)) {
8622 /* Base writeback. */
8624 case 0: offset = -8; break;
8625 case 1: offset = 4; break;
8626 case 2: offset = -4; break;
8627 case 3: offset = 0; break;
8631 tcg_gen_addi_i32(addr, addr, offset);
8632 store_reg(s, rn, addr);
8634 tcg_temp_free_i32(addr);
8636 gen_rfe(s, tmp, tmp2);
8638 } else if ((insn & 0x0e000000) == 0x0a000000) {
8639 /* branch link and change to thumb (blx <offset>) */
8642 val = (uint32_t)s->pc;
8643 tmp = tcg_temp_new_i32();
8644 tcg_gen_movi_i32(tmp, val);
8645 store_reg(s, 14, tmp);
8646 /* Sign-extend the 24-bit offset */
8647 offset = (((int32_t)insn) << 8) >> 8;
8648 /* offset * 4 + bit24 * 2 + (thumb bit) */
8649 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8650 /* pipeline offset */
8652 /* protected by ARCH(5); above, near the start of uncond block */
8655 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8656 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8657 /* iWMMXt register transfer. */
8658 if (extract32(s->c15_cpar, 1, 1)) {
8659 if (!disas_iwmmxt_insn(s, insn)) {
8664 } else if ((insn & 0x0e000a00) == 0x0c000800
8665 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8666 if (disas_neon_insn_3same_ext(s, insn)) {
8670 } else if ((insn & 0x0f000a00) == 0x0e000800
8671 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8672 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8676 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8677 /* Coprocessor double register transfer. */
8679 } else if ((insn & 0x0f000010) == 0x0e000010) {
8680 /* Additional coprocessor register transfer. */
8681 } else if ((insn & 0x0ff10020) == 0x01000000) {
8684 /* cps (privileged) */
8688 if (insn & (1 << 19)) {
8689 if (insn & (1 << 8))
8691 if (insn & (1 << 7))
8693 if (insn & (1 << 6))
8695 if (insn & (1 << 18))
8698 if (insn & (1 << 17)) {
8700 val |= (insn & 0x1f);
8703 gen_set_psr_im(s, mask, 0, val);
8710 /* if not always execute, we generate a conditional jump to
8712 s->condlabel = gen_new_label();
8713 arm_gen_test_cc(cond ^ 1, s->condlabel);
8716 if ((insn & 0x0f900000) == 0x03000000) {
8717 if ((insn & (1 << 21)) == 0) {
8719 rd = (insn >> 12) & 0xf;
8720 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8721 if ((insn & (1 << 22)) == 0) {
8723 tmp = tcg_temp_new_i32();
8724 tcg_gen_movi_i32(tmp, val);
8727 tmp = load_reg(s, rd);
8728 tcg_gen_ext16u_i32(tmp, tmp);
8729 tcg_gen_ori_i32(tmp, tmp, val << 16);
8731 store_reg(s, rd, tmp);
8733 if (((insn >> 12) & 0xf) != 0xf)
8735 if (((insn >> 16) & 0xf) == 0) {
8736 gen_nop_hint(s, insn & 0xff);
8738 /* CPSR = immediate */
8740 shift = ((insn >> 8) & 0xf) * 2;
8742 val = (val >> shift) | (val << (32 - shift));
8743 i = ((insn & (1 << 22)) != 0);
8744 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8750 } else if ((insn & 0x0f900000) == 0x01000000
8751 && (insn & 0x00000090) != 0x00000090) {
8752 /* miscellaneous instructions */
8753 op1 = (insn >> 21) & 3;
8754 sh = (insn >> 4) & 0xf;
8757 case 0x0: /* MSR, MRS */
8758 if (insn & (1 << 9)) {
8759 /* MSR (banked) and MRS (banked) */
8760 int sysm = extract32(insn, 16, 4) |
8761 (extract32(insn, 8, 1) << 4);
8762 int r = extract32(insn, 22, 1);
8766 gen_msr_banked(s, r, sysm, rm);
8769 int rd = extract32(insn, 12, 4);
8771 gen_mrs_banked(s, r, sysm, rd);
8776 /* MSR, MRS (for PSRs) */
8779 tmp = load_reg(s, rm);
8780 i = ((op1 & 2) != 0);
8781 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8785 rd = (insn >> 12) & 0xf;
8789 tmp = load_cpu_field(spsr);
8791 tmp = tcg_temp_new_i32();
8792 gen_helper_cpsr_read(tmp, cpu_env);
8794 store_reg(s, rd, tmp);
8799 /* branch/exchange thumb (bx). */
8801 tmp = load_reg(s, rm);
8803 } else if (op1 == 3) {
8806 rd = (insn >> 12) & 0xf;
8807 tmp = load_reg(s, rm);
8808 tcg_gen_clzi_i32(tmp, tmp, 32);
8809 store_reg(s, rd, tmp);
8817 /* Trivial implementation equivalent to bx. */
8818 tmp = load_reg(s, rm);
8829 /* branch link/exchange thumb (blx) */
8830 tmp = load_reg(s, rm);
8831 tmp2 = tcg_temp_new_i32();
8832 tcg_gen_movi_i32(tmp2, s->pc);
8833 store_reg(s, 14, tmp2);
8839 uint32_t c = extract32(insn, 8, 4);
8841 /* Check this CPU supports ARMv8 CRC instructions.
8842 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8843 * Bits 8, 10 and 11 should be zero.
8845 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8850 rn = extract32(insn, 16, 4);
8851 rd = extract32(insn, 12, 4);
8853 tmp = load_reg(s, rn);
8854 tmp2 = load_reg(s, rm);
8856 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8857 } else if (op1 == 1) {
8858 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8860 tmp3 = tcg_const_i32(1 << op1);
8862 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8864 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8866 tcg_temp_free_i32(tmp2);
8867 tcg_temp_free_i32(tmp3);
8868 store_reg(s, rd, tmp);
8871 case 0x5: /* saturating add/subtract */
8873 rd = (insn >> 12) & 0xf;
8874 rn = (insn >> 16) & 0xf;
8875 tmp = load_reg(s, rm);
8876 tmp2 = load_reg(s, rn);
8878 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8880 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8882 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8883 tcg_temp_free_i32(tmp2);
8884 store_reg(s, rd, tmp);
8888 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8897 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
8900 /* Hypervisor call (v7) */
8908 /* Secure monitor call (v6+) */
8916 g_assert_not_reached();
8920 case 0x8: /* signed multiply */
8925 rs = (insn >> 8) & 0xf;
8926 rn = (insn >> 12) & 0xf;
8927 rd = (insn >> 16) & 0xf;
8929 /* (32 * 16) >> 16 */
8930 tmp = load_reg(s, rm);
8931 tmp2 = load_reg(s, rs);
8933 tcg_gen_sari_i32(tmp2, tmp2, 16);
8936 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8937 tcg_gen_shri_i64(tmp64, tmp64, 16);
8938 tmp = tcg_temp_new_i32();
8939 tcg_gen_extrl_i64_i32(tmp, tmp64);
8940 tcg_temp_free_i64(tmp64);
8941 if ((sh & 2) == 0) {
8942 tmp2 = load_reg(s, rn);
8943 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8944 tcg_temp_free_i32(tmp2);
8946 store_reg(s, rd, tmp);
8949 tmp = load_reg(s, rm);
8950 tmp2 = load_reg(s, rs);
8951 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8952 tcg_temp_free_i32(tmp2);
8954 tmp64 = tcg_temp_new_i64();
8955 tcg_gen_ext_i32_i64(tmp64, tmp);
8956 tcg_temp_free_i32(tmp);
8957 gen_addq(s, tmp64, rn, rd);
8958 gen_storeq_reg(s, rn, rd, tmp64);
8959 tcg_temp_free_i64(tmp64);
8962 tmp2 = load_reg(s, rn);
8963 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8964 tcg_temp_free_i32(tmp2);
8966 store_reg(s, rd, tmp);
8973 } else if (((insn & 0x0e000000) == 0 &&
8974 (insn & 0x00000090) != 0x90) ||
8975 ((insn & 0x0e000000) == (1 << 25))) {
8976 int set_cc, logic_cc, shiftop;
8978 op1 = (insn >> 21) & 0xf;
8979 set_cc = (insn >> 20) & 1;
8980 logic_cc = table_logic_cc[op1] & set_cc;
8982 /* data processing instruction */
8983 if (insn & (1 << 25)) {
8984 /* immediate operand */
8986 shift = ((insn >> 8) & 0xf) * 2;
8988 val = (val >> shift) | (val << (32 - shift));
8990 tmp2 = tcg_temp_new_i32();
8991 tcg_gen_movi_i32(tmp2, val);
8992 if (logic_cc && shift) {
8993 gen_set_CF_bit31(tmp2);
8998 tmp2 = load_reg(s, rm);
8999 shiftop = (insn >> 5) & 3;
9000 if (!(insn & (1 << 4))) {
9001 shift = (insn >> 7) & 0x1f;
9002 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9004 rs = (insn >> 8) & 0xf;
9005 tmp = load_reg(s, rs);
9006 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9009 if (op1 != 0x0f && op1 != 0x0d) {
9010 rn = (insn >> 16) & 0xf;
9011 tmp = load_reg(s, rn);
9015 rd = (insn >> 12) & 0xf;
9018 tcg_gen_and_i32(tmp, tmp, tmp2);
9022 store_reg_bx(s, rd, tmp);
9025 tcg_gen_xor_i32(tmp, tmp, tmp2);
9029 store_reg_bx(s, rd, tmp);
9032 if (set_cc && rd == 15) {
9033 /* SUBS r15, ... is used for exception return. */
9037 gen_sub_CC(tmp, tmp, tmp2);
9038 gen_exception_return(s, tmp);
9041 gen_sub_CC(tmp, tmp, tmp2);
9043 tcg_gen_sub_i32(tmp, tmp, tmp2);
9045 store_reg_bx(s, rd, tmp);
9050 gen_sub_CC(tmp, tmp2, tmp);
9052 tcg_gen_sub_i32(tmp, tmp2, tmp);
9054 store_reg_bx(s, rd, tmp);
9058 gen_add_CC(tmp, tmp, tmp2);
9060 tcg_gen_add_i32(tmp, tmp, tmp2);
9062 store_reg_bx(s, rd, tmp);
9066 gen_adc_CC(tmp, tmp, tmp2);
9068 gen_add_carry(tmp, tmp, tmp2);
9070 store_reg_bx(s, rd, tmp);
9074 gen_sbc_CC(tmp, tmp, tmp2);
9076 gen_sub_carry(tmp, tmp, tmp2);
9078 store_reg_bx(s, rd, tmp);
9082 gen_sbc_CC(tmp, tmp2, tmp);
9084 gen_sub_carry(tmp, tmp2, tmp);
9086 store_reg_bx(s, rd, tmp);
9090 tcg_gen_and_i32(tmp, tmp, tmp2);
9093 tcg_temp_free_i32(tmp);
9097 tcg_gen_xor_i32(tmp, tmp, tmp2);
9100 tcg_temp_free_i32(tmp);
9104 gen_sub_CC(tmp, tmp, tmp2);
9106 tcg_temp_free_i32(tmp);
9110 gen_add_CC(tmp, tmp, tmp2);
9112 tcg_temp_free_i32(tmp);
9115 tcg_gen_or_i32(tmp, tmp, tmp2);
9119 store_reg_bx(s, rd, tmp);
9122 if (logic_cc && rd == 15) {
9123 /* MOVS r15, ... is used for exception return. */
9127 gen_exception_return(s, tmp2);
9132 store_reg_bx(s, rd, tmp2);
9136 tcg_gen_andc_i32(tmp, tmp, tmp2);
9140 store_reg_bx(s, rd, tmp);
9144 tcg_gen_not_i32(tmp2, tmp2);
9148 store_reg_bx(s, rd, tmp2);
9151 if (op1 != 0x0f && op1 != 0x0d) {
9152 tcg_temp_free_i32(tmp2);
9155 /* other instructions */
9156 op1 = (insn >> 24) & 0xf;
9160 /* multiplies, extra load/stores */
9161 sh = (insn >> 5) & 3;
9164 rd = (insn >> 16) & 0xf;
9165 rn = (insn >> 12) & 0xf;
9166 rs = (insn >> 8) & 0xf;
9168 op1 = (insn >> 20) & 0xf;
9170 case 0: case 1: case 2: case 3: case 6:
9172 tmp = load_reg(s, rs);
9173 tmp2 = load_reg(s, rm);
9174 tcg_gen_mul_i32(tmp, tmp, tmp2);
9175 tcg_temp_free_i32(tmp2);
9176 if (insn & (1 << 22)) {
9177 /* Subtract (mls) */
9179 tmp2 = load_reg(s, rn);
9180 tcg_gen_sub_i32(tmp, tmp2, tmp);
9181 tcg_temp_free_i32(tmp2);
9182 } else if (insn & (1 << 21)) {
9184 tmp2 = load_reg(s, rn);
9185 tcg_gen_add_i32(tmp, tmp, tmp2);
9186 tcg_temp_free_i32(tmp2);
9188 if (insn & (1 << 20))
9190 store_reg(s, rd, tmp);
9193 /* 64 bit mul double accumulate (UMAAL) */
9195 tmp = load_reg(s, rs);
9196 tmp2 = load_reg(s, rm);
9197 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9198 gen_addq_lo(s, tmp64, rn);
9199 gen_addq_lo(s, tmp64, rd);
9200 gen_storeq_reg(s, rn, rd, tmp64);
9201 tcg_temp_free_i64(tmp64);
9203 case 8: case 9: case 10: case 11:
9204 case 12: case 13: case 14: case 15:
9205 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
9206 tmp = load_reg(s, rs);
9207 tmp2 = load_reg(s, rm);
9208 if (insn & (1 << 22)) {
9209 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
9211 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
9213 if (insn & (1 << 21)) { /* mult accumulate */
9214 TCGv_i32 al = load_reg(s, rn);
9215 TCGv_i32 ah = load_reg(s, rd);
9216 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
9217 tcg_temp_free_i32(al);
9218 tcg_temp_free_i32(ah);
9220 if (insn & (1 << 20)) {
9221 gen_logicq_cc(tmp, tmp2);
9223 store_reg(s, rn, tmp);
9224 store_reg(s, rd, tmp2);
9230 rn = (insn >> 16) & 0xf;
9231 rd = (insn >> 12) & 0xf;
9232 if (insn & (1 << 23)) {
9233 /* load/store exclusive */
9234 int op2 = (insn >> 8) & 3;
9235 op1 = (insn >> 21) & 0x3;
9238 case 0: /* lda/stl */
9244 case 1: /* reserved */
9246 case 2: /* ldaex/stlex */
9249 case 3: /* ldrex/strex */
9258 addr = tcg_temp_local_new_i32();
9259 load_reg_var(s, addr, rn);
9261 /* Since the emulation does not have barriers,
9262 the acquire/release semantics need no special
9265 if (insn & (1 << 20)) {
9266 tmp = tcg_temp_new_i32();
9269 gen_aa32_ld32u_iss(s, tmp, addr,
9274 gen_aa32_ld8u_iss(s, tmp, addr,
9279 gen_aa32_ld16u_iss(s, tmp, addr,
9286 store_reg(s, rd, tmp);
9289 tmp = load_reg(s, rm);
9292 gen_aa32_st32_iss(s, tmp, addr,
9297 gen_aa32_st8_iss(s, tmp, addr,
9302 gen_aa32_st16_iss(s, tmp, addr,
9309 tcg_temp_free_i32(tmp);
9311 } else if (insn & (1 << 20)) {
9314 gen_load_exclusive(s, rd, 15, addr, 2);
9316 case 1: /* ldrexd */
9317 gen_load_exclusive(s, rd, rd + 1, addr, 3);
9319 case 2: /* ldrexb */
9320 gen_load_exclusive(s, rd, 15, addr, 0);
9322 case 3: /* ldrexh */
9323 gen_load_exclusive(s, rd, 15, addr, 1);
9332 gen_store_exclusive(s, rd, rm, 15, addr, 2);
9334 case 1: /* strexd */
9335 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
9337 case 2: /* strexb */
9338 gen_store_exclusive(s, rd, rm, 15, addr, 0);
9340 case 3: /* strexh */
9341 gen_store_exclusive(s, rd, rm, 15, addr, 1);
9347 tcg_temp_free_i32(addr);
9348 } else if ((insn & 0x00300f00) == 0) {
9349 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9354 TCGMemOp opc = s->be_data;
9358 if (insn & (1 << 22)) {
9361 opc |= MO_UL | MO_ALIGN;
9364 addr = load_reg(s, rn);
9365 taddr = gen_aa32_addr(s, addr, opc);
9366 tcg_temp_free_i32(addr);
9368 tmp = load_reg(s, rm);
9369 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9370 get_mem_index(s), opc);
9371 tcg_temp_free(taddr);
9372 store_reg(s, rd, tmp);
9379 bool load = insn & (1 << 20);
9380 bool wbit = insn & (1 << 21);
9381 bool pbit = insn & (1 << 24);
9382 bool doubleword = false;
9385 /* Misc load/store */
9386 rn = (insn >> 16) & 0xf;
9387 rd = (insn >> 12) & 0xf;
9389 /* ISS not valid if writeback */
9390 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9392 if (!load && (sh & 2)) {
9396 /* UNPREDICTABLE; we choose to UNDEF */
9399 load = (sh & 1) == 0;
9403 addr = load_reg(s, rn);
9405 gen_add_datah_offset(s, insn, 0, addr);
9412 tmp = load_reg(s, rd);
9413 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9414 tcg_temp_free_i32(tmp);
9415 tcg_gen_addi_i32(addr, addr, 4);
9416 tmp = load_reg(s, rd + 1);
9417 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9418 tcg_temp_free_i32(tmp);
9421 tmp = tcg_temp_new_i32();
9422 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9423 store_reg(s, rd, tmp);
9424 tcg_gen_addi_i32(addr, addr, 4);
9425 tmp = tcg_temp_new_i32();
9426 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9429 address_offset = -4;
9432 tmp = tcg_temp_new_i32();
9435 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9439 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9444 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9450 tmp = load_reg(s, rd);
9451 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
9452 tcg_temp_free_i32(tmp);
9454 /* Perform base writeback before the loaded value to
9455 ensure correct behavior with overlapping index registers.
9456 ldrd with base writeback is undefined if the
9457 destination and index registers overlap. */
9459 gen_add_datah_offset(s, insn, address_offset, addr);
9460 store_reg(s, rn, addr);
9463 tcg_gen_addi_i32(addr, addr, address_offset);
9464 store_reg(s, rn, addr);
9466 tcg_temp_free_i32(addr);
9469 /* Complete the load. */
9470 store_reg(s, rd, tmp);
9479 if (insn & (1 << 4)) {
9481 /* Armv6 Media instructions. */
9483 rn = (insn >> 16) & 0xf;
9484 rd = (insn >> 12) & 0xf;
9485 rs = (insn >> 8) & 0xf;
9486 switch ((insn >> 23) & 3) {
9487 case 0: /* Parallel add/subtract. */
9488 op1 = (insn >> 20) & 7;
9489 tmp = load_reg(s, rn);
9490 tmp2 = load_reg(s, rm);
9491 sh = (insn >> 5) & 7;
9492 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9494 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
9495 tcg_temp_free_i32(tmp2);
9496 store_reg(s, rd, tmp);
9499 if ((insn & 0x00700020) == 0) {
9500 /* Halfword pack. */
9501 tmp = load_reg(s, rn);
9502 tmp2 = load_reg(s, rm);
9503 shift = (insn >> 7) & 0x1f;
9504 if (insn & (1 << 6)) {
9508 tcg_gen_sari_i32(tmp2, tmp2, shift);
9509 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9510 tcg_gen_ext16u_i32(tmp2, tmp2);
9514 tcg_gen_shli_i32(tmp2, tmp2, shift);
9515 tcg_gen_ext16u_i32(tmp, tmp);
9516 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9518 tcg_gen_or_i32(tmp, tmp, tmp2);
9519 tcg_temp_free_i32(tmp2);
9520 store_reg(s, rd, tmp);
9521 } else if ((insn & 0x00200020) == 0x00200000) {
9523 tmp = load_reg(s, rm);
9524 shift = (insn >> 7) & 0x1f;
9525 if (insn & (1 << 6)) {
9528 tcg_gen_sari_i32(tmp, tmp, shift);
9530 tcg_gen_shli_i32(tmp, tmp, shift);
9532 sh = (insn >> 16) & 0x1f;
9533 tmp2 = tcg_const_i32(sh);
9534 if (insn & (1 << 22))
9535 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9537 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9538 tcg_temp_free_i32(tmp2);
9539 store_reg(s, rd, tmp);
9540 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9542 tmp = load_reg(s, rm);
9543 sh = (insn >> 16) & 0x1f;
9544 tmp2 = tcg_const_i32(sh);
9545 if (insn & (1 << 22))
9546 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9548 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9549 tcg_temp_free_i32(tmp2);
9550 store_reg(s, rd, tmp);
9551 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9553 tmp = load_reg(s, rn);
9554 tmp2 = load_reg(s, rm);
9555 tmp3 = tcg_temp_new_i32();
9556 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9557 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9558 tcg_temp_free_i32(tmp3);
9559 tcg_temp_free_i32(tmp2);
9560 store_reg(s, rd, tmp);
9561 } else if ((insn & 0x000003e0) == 0x00000060) {
9562 tmp = load_reg(s, rm);
9563 shift = (insn >> 10) & 3;
9564 /* ??? In many cases it's not necessary to do a
9565 rotate, a shift is sufficient. */
9567 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9568 op1 = (insn >> 20) & 7;
9570 case 0: gen_sxtb16(tmp); break;
9571 case 2: gen_sxtb(tmp); break;
9572 case 3: gen_sxth(tmp); break;
9573 case 4: gen_uxtb16(tmp); break;
9574 case 6: gen_uxtb(tmp); break;
9575 case 7: gen_uxth(tmp); break;
9576 default: goto illegal_op;
9579 tmp2 = load_reg(s, rn);
9580 if ((op1 & 3) == 0) {
9581 gen_add16(tmp, tmp2);
9583 tcg_gen_add_i32(tmp, tmp, tmp2);
9584 tcg_temp_free_i32(tmp2);
9587 store_reg(s, rd, tmp);
9588 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9590 tmp = load_reg(s, rm);
9591 if (insn & (1 << 22)) {
9592 if (insn & (1 << 7)) {
9596 gen_helper_rbit(tmp, tmp);
9599 if (insn & (1 << 7))
9602 tcg_gen_bswap32_i32(tmp, tmp);
9604 store_reg(s, rd, tmp);
9609 case 2: /* Multiplies (Type 3). */
9610 switch ((insn >> 20) & 0x7) {
9612 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9613 /* op2 not 00x or 11x : UNDEF */
9616 /* Signed multiply most significant [accumulate].
9617 (SMMUL, SMMLA, SMMLS) */
9618 tmp = load_reg(s, rm);
9619 tmp2 = load_reg(s, rs);
9620 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9623 tmp = load_reg(s, rd);
9624 if (insn & (1 << 6)) {
9625 tmp64 = gen_subq_msw(tmp64, tmp);
9627 tmp64 = gen_addq_msw(tmp64, tmp);
9630 if (insn & (1 << 5)) {
9631 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9633 tcg_gen_shri_i64(tmp64, tmp64, 32);
9634 tmp = tcg_temp_new_i32();
9635 tcg_gen_extrl_i64_i32(tmp, tmp64);
9636 tcg_temp_free_i64(tmp64);
9637 store_reg(s, rn, tmp);
9641 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9642 if (insn & (1 << 7)) {
9645 tmp = load_reg(s, rm);
9646 tmp2 = load_reg(s, rs);
9647 if (insn & (1 << 5))
9648 gen_swap_half(tmp2);
9649 gen_smul_dual(tmp, tmp2);
9650 if (insn & (1 << 22)) {
9651 /* smlald, smlsld */
9654 tmp64 = tcg_temp_new_i64();
9655 tmp64_2 = tcg_temp_new_i64();
9656 tcg_gen_ext_i32_i64(tmp64, tmp);
9657 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9658 tcg_temp_free_i32(tmp);
9659 tcg_temp_free_i32(tmp2);
9660 if (insn & (1 << 6)) {
9661 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9663 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9665 tcg_temp_free_i64(tmp64_2);
9666 gen_addq(s, tmp64, rd, rn);
9667 gen_storeq_reg(s, rd, rn, tmp64);
9668 tcg_temp_free_i64(tmp64);
9670 /* smuad, smusd, smlad, smlsd */
9671 if (insn & (1 << 6)) {
9672 /* This subtraction cannot overflow. */
9673 tcg_gen_sub_i32(tmp, tmp, tmp2);
9675 /* This addition cannot overflow 32 bits;
9676 * however it may overflow considered as a
9677 * signed operation, in which case we must set
9680 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9682 tcg_temp_free_i32(tmp2);
9685 tmp2 = load_reg(s, rd);
9686 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9687 tcg_temp_free_i32(tmp2);
9689 store_reg(s, rn, tmp);
9695 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9698 if (((insn >> 5) & 7) || (rd != 15)) {
9701 tmp = load_reg(s, rm);
9702 tmp2 = load_reg(s, rs);
9703 if (insn & (1 << 21)) {
9704 gen_helper_udiv(tmp, tmp, tmp2);
9706 gen_helper_sdiv(tmp, tmp, tmp2);
9708 tcg_temp_free_i32(tmp2);
9709 store_reg(s, rn, tmp);
9716 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9718 case 0: /* Unsigned sum of absolute differences. */
9720 tmp = load_reg(s, rm);
9721 tmp2 = load_reg(s, rs);
9722 gen_helper_usad8(tmp, tmp, tmp2);
9723 tcg_temp_free_i32(tmp2);
9725 tmp2 = load_reg(s, rd);
9726 tcg_gen_add_i32(tmp, tmp, tmp2);
9727 tcg_temp_free_i32(tmp2);
9729 store_reg(s, rn, tmp);
9731 case 0x20: case 0x24: case 0x28: case 0x2c:
9732 /* Bitfield insert/clear. */
9734 shift = (insn >> 7) & 0x1f;
9735 i = (insn >> 16) & 0x1f;
9737 /* UNPREDICTABLE; we choose to UNDEF */
9742 tmp = tcg_temp_new_i32();
9743 tcg_gen_movi_i32(tmp, 0);
9745 tmp = load_reg(s, rm);
9748 tmp2 = load_reg(s, rd);
9749 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9750 tcg_temp_free_i32(tmp2);
9752 store_reg(s, rd, tmp);
9754 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9755 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9757 tmp = load_reg(s, rm);
9758 shift = (insn >> 7) & 0x1f;
9759 i = ((insn >> 16) & 0x1f) + 1;
9764 tcg_gen_extract_i32(tmp, tmp, shift, i);
9766 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9769 store_reg(s, rd, tmp);
9779 /* Check for undefined extension instructions
9780 * per the ARM Bible IE:
9781 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9783 sh = (0xf << 20) | (0xf << 4);
9784 if (op1 == 0x7 && ((insn & sh) == sh))
9788 /* load/store byte/word */
9789 rn = (insn >> 16) & 0xf;
9790 rd = (insn >> 12) & 0xf;
9791 tmp2 = load_reg(s, rn);
9792 if ((insn & 0x01200000) == 0x00200000) {
9794 i = get_a32_user_mem_index(s);
9796 i = get_mem_index(s);
9798 if (insn & (1 << 24))
9799 gen_add_data_offset(s, insn, tmp2);
9800 if (insn & (1 << 20)) {
9802 tmp = tcg_temp_new_i32();
9803 if (insn & (1 << 22)) {
9804 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9806 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9810 tmp = load_reg(s, rd);
9811 if (insn & (1 << 22)) {
9812 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9814 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9816 tcg_temp_free_i32(tmp);
9818 if (!(insn & (1 << 24))) {
9819 gen_add_data_offset(s, insn, tmp2);
9820 store_reg(s, rn, tmp2);
9821 } else if (insn & (1 << 21)) {
9822 store_reg(s, rn, tmp2);
9824 tcg_temp_free_i32(tmp2);
9826 if (insn & (1 << 20)) {
9827 /* Complete the load. */
9828 store_reg_from_load(s, rd, tmp);
9834 int j, n, loaded_base;
9835 bool exc_return = false;
9836 bool is_load = extract32(insn, 20, 1);
9838 TCGv_i32 loaded_var;
9839 /* load/store multiple words */
9840 /* XXX: store correct base if write back */
9841 if (insn & (1 << 22)) {
9842 /* LDM (user), LDM (exception return) and STM (user) */
9844 goto illegal_op; /* only usable in supervisor mode */
9846 if (is_load && extract32(insn, 15, 1)) {
9852 rn = (insn >> 16) & 0xf;
9853 addr = load_reg(s, rn);
9855 /* compute total size */
9860 if (insn & (1 << i))
9863 /* XXX: test invalid n == 0 case ? */
9864 if (insn & (1 << 23)) {
9865 if (insn & (1 << 24)) {
9867 tcg_gen_addi_i32(addr, addr, 4);
9869 /* post increment */
9872 if (insn & (1 << 24)) {
9874 tcg_gen_addi_i32(addr, addr, -(n * 4));
9876 /* post decrement */
9878 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9883 if (insn & (1 << i)) {
9886 tmp = tcg_temp_new_i32();
9887 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9889 tmp2 = tcg_const_i32(i);
9890 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9891 tcg_temp_free_i32(tmp2);
9892 tcg_temp_free_i32(tmp);
9893 } else if (i == rn) {
9896 } else if (rn == 15 && exc_return) {
9897 store_pc_exc_ret(s, tmp);
9899 store_reg_from_load(s, i, tmp);
9904 /* special case: r15 = PC + 8 */
9905 val = (long)s->pc + 4;
9906 tmp = tcg_temp_new_i32();
9907 tcg_gen_movi_i32(tmp, val);
9909 tmp = tcg_temp_new_i32();
9910 tmp2 = tcg_const_i32(i);
9911 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9912 tcg_temp_free_i32(tmp2);
9914 tmp = load_reg(s, i);
9916 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9917 tcg_temp_free_i32(tmp);
9920 /* no need to add after the last transfer */
9922 tcg_gen_addi_i32(addr, addr, 4);
9925 if (insn & (1 << 21)) {
9927 if (insn & (1 << 23)) {
9928 if (insn & (1 << 24)) {
9931 /* post increment */
9932 tcg_gen_addi_i32(addr, addr, 4);
9935 if (insn & (1 << 24)) {
9938 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9940 /* post decrement */
9941 tcg_gen_addi_i32(addr, addr, -(n * 4));
9944 store_reg(s, rn, addr);
9946 tcg_temp_free_i32(addr);
9949 store_reg(s, rn, loaded_var);
9952 /* Restore CPSR from SPSR. */
9953 tmp = load_cpu_field(spsr);
9954 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9957 gen_helper_cpsr_write_eret(cpu_env, tmp);
9958 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9961 tcg_temp_free_i32(tmp);
9962 /* Must exit loop to check un-masked IRQs */
9963 s->base.is_jmp = DISAS_EXIT;
9972 /* branch (and link) */
9973 val = (int32_t)s->pc;
9974 if (insn & (1 << 24)) {
9975 tmp = tcg_temp_new_i32();
9976 tcg_gen_movi_i32(tmp, val);
9977 store_reg(s, 14, tmp);
9979 offset = sextract32(insn << 2, 0, 26);
9987 if (((insn >> 8) & 0xe) == 10) {
9989 if (disas_vfp_insn(s, insn)) {
9992 } else if (disas_coproc_insn(s, insn)) {
9999 gen_set_pc_im(s, s->pc);
10000 s->svc_imm = extract32(insn, 0, 24);
10001 s->base.is_jmp = DISAS_SWI;
10005 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10006 default_exception_el(s));
10012 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10014 /* Return true if this is a 16 bit instruction. We must be precise
10015 * about this (matching the decode). We assume that s->pc still
10016 * points to the first 16 bits of the insn.
10018 if ((insn >> 11) < 0x1d) {
10019 /* Definitely a 16-bit instruction */
10023 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10024 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10025 * end up actually treating this as two 16-bit insns, though,
10026 * if it's half of a bl/blx pair that might span a page boundary.
10028 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10029 arm_dc_feature(s, ARM_FEATURE_M)) {
10030 /* Thumb2 cores (including all M profile ones) always treat
10031 * 32-bit insns as 32-bit.
10036 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
10037 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10038 * is not on the next page; we merge this into a 32-bit
10043 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10044 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10045 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10046 * -- handle as single 16 bit insn
10051 /* Return true if this is a Thumb-2 logical op. */
10053 thumb2_logic_op(int op)
10058 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10059 then set condition code flags based on the result of the operation.
10060 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10061 to the high bit of T1.
10062 Returns zero if the opcode is valid. */
10065 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10066 TCGv_i32 t0, TCGv_i32 t1)
10073 tcg_gen_and_i32(t0, t0, t1);
10077 tcg_gen_andc_i32(t0, t0, t1);
10081 tcg_gen_or_i32(t0, t0, t1);
10085 tcg_gen_orc_i32(t0, t0, t1);
10089 tcg_gen_xor_i32(t0, t0, t1);
10094 gen_add_CC(t0, t0, t1);
10096 tcg_gen_add_i32(t0, t0, t1);
10100 gen_adc_CC(t0, t0, t1);
10106 gen_sbc_CC(t0, t0, t1);
10108 gen_sub_carry(t0, t0, t1);
10113 gen_sub_CC(t0, t0, t1);
10115 tcg_gen_sub_i32(t0, t0, t1);
10119 gen_sub_CC(t0, t1, t0);
10121 tcg_gen_sub_i32(t0, t1, t0);
10123 default: /* 5, 6, 7, 9, 12, 15. */
10129 gen_set_CF_bit31(t1);
10134 /* Translate a 32-bit thumb instruction. */
10135 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
10137 uint32_t imm, shift, offset;
10138 uint32_t rd, rn, rm, rs;
10150 * ARMv6-M supports a limited subset of Thumb2 instructions.
10151 * Other Thumb1 architectures allow only 32-bit
10152 * combined BL/BLX prefix and suffix.
10154 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10155 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10157 bool found = false;
10158 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10159 0xf3b08040 /* dsb */,
10160 0xf3b08050 /* dmb */,
10161 0xf3b08060 /* isb */,
10162 0xf3e08000 /* mrs */,
10163 0xf000d000 /* bl */};
10164 static const uint32_t armv6m_mask[] = {0xffe0d000,
10171 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10172 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10180 } else if ((insn & 0xf800e800) != 0xf000e800) {
10184 rn = (insn >> 16) & 0xf;
10185 rs = (insn >> 12) & 0xf;
10186 rd = (insn >> 8) & 0xf;
10188 switch ((insn >> 25) & 0xf) {
10189 case 0: case 1: case 2: case 3:
10190 /* 16-bit instructions. Should never happen. */
10193 if (insn & (1 << 22)) {
10194 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10195 * - load/store doubleword, load/store exclusive, ldacq/strel,
10196 * table branch, TT.
10198 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10199 arm_dc_feature(s, ARM_FEATURE_V8)) {
10200 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10202 * The bulk of the behaviour for this instruction is implemented
10203 * in v7m_handle_execute_nsc(), which deals with the insn when
10204 * it is executed by a CPU in non-secure state from memory
10205 * which is Secure & NonSecure-Callable.
10206 * Here we only need to handle the remaining cases:
10207 * * in NS memory (including the "security extension not
10208 * implemented" case) : NOP
10209 * * in S memory but CPU already secure (clear IT bits)
10210 * We know that the attribute for the memory this insn is
10211 * in must match the current CPU state, because otherwise
10212 * get_phys_addr_pmsav8 would have generated an exception.
10214 if (s->v8m_secure) {
10215 /* Like the IT insn, we don't need to generate any code */
10216 s->condexec_cond = 0;
10217 s->condexec_mask = 0;
10219 } else if (insn & 0x01200000) {
10220 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10221 * - load/store dual (post-indexed)
10222 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10223 * - load/store dual (literal and immediate)
10224 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10225 * - load/store dual (pre-indexed)
10228 if (insn & (1 << 21)) {
10229 /* UNPREDICTABLE */
10232 addr = tcg_temp_new_i32();
10233 tcg_gen_movi_i32(addr, s->pc & ~3);
10235 addr = load_reg(s, rn);
10237 offset = (insn & 0xff) * 4;
10238 if ((insn & (1 << 23)) == 0)
10240 if (insn & (1 << 24)) {
10241 tcg_gen_addi_i32(addr, addr, offset);
10244 if (insn & (1 << 20)) {
10246 tmp = tcg_temp_new_i32();
10247 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10248 store_reg(s, rs, tmp);
10249 tcg_gen_addi_i32(addr, addr, 4);
10250 tmp = tcg_temp_new_i32();
10251 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10252 store_reg(s, rd, tmp);
10255 tmp = load_reg(s, rs);
10256 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10257 tcg_temp_free_i32(tmp);
10258 tcg_gen_addi_i32(addr, addr, 4);
10259 tmp = load_reg(s, rd);
10260 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10261 tcg_temp_free_i32(tmp);
10263 if (insn & (1 << 21)) {
10264 /* Base writeback. */
10265 tcg_gen_addi_i32(addr, addr, offset - 4);
10266 store_reg(s, rn, addr);
10268 tcg_temp_free_i32(addr);
10270 } else if ((insn & (1 << 23)) == 0) {
10271 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10272 * - load/store exclusive word
10276 if (!(insn & (1 << 20)) &&
10277 arm_dc_feature(s, ARM_FEATURE_M) &&
10278 arm_dc_feature(s, ARM_FEATURE_V8)) {
10279 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10282 bool alt = insn & (1 << 7);
10283 TCGv_i32 addr, op, ttresp;
10285 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10286 /* we UNDEF for these UNPREDICTABLE cases */
10290 if (alt && !s->v8m_secure) {
10294 addr = load_reg(s, rn);
10295 op = tcg_const_i32(extract32(insn, 6, 2));
10296 ttresp = tcg_temp_new_i32();
10297 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10298 tcg_temp_free_i32(addr);
10299 tcg_temp_free_i32(op);
10300 store_reg(s, rd, ttresp);
10305 addr = tcg_temp_local_new_i32();
10306 load_reg_var(s, addr, rn);
10307 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
10308 if (insn & (1 << 20)) {
10309 gen_load_exclusive(s, rs, 15, addr, 2);
10311 gen_store_exclusive(s, rd, rs, 15, addr, 2);
10313 tcg_temp_free_i32(addr);
10314 } else if ((insn & (7 << 5)) == 0) {
10315 /* Table Branch. */
10317 addr = tcg_temp_new_i32();
10318 tcg_gen_movi_i32(addr, s->pc);
10320 addr = load_reg(s, rn);
10322 tmp = load_reg(s, rm);
10323 tcg_gen_add_i32(addr, addr, tmp);
10324 if (insn & (1 << 4)) {
10326 tcg_gen_add_i32(addr, addr, tmp);
10327 tcg_temp_free_i32(tmp);
10328 tmp = tcg_temp_new_i32();
10329 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
10331 tcg_temp_free_i32(tmp);
10332 tmp = tcg_temp_new_i32();
10333 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
10335 tcg_temp_free_i32(addr);
10336 tcg_gen_shli_i32(tmp, tmp, 1);
10337 tcg_gen_addi_i32(tmp, tmp, s->pc);
10338 store_reg(s, 15, tmp);
10340 int op2 = (insn >> 6) & 0x3;
10341 op = (insn >> 4) & 0x3;
10346 /* Load/store exclusive byte/halfword/doubleword */
10353 /* Load-acquire/store-release */
10359 /* Load-acquire/store-release exclusive */
10363 addr = tcg_temp_local_new_i32();
10364 load_reg_var(s, addr, rn);
10366 if (insn & (1 << 20)) {
10367 tmp = tcg_temp_new_i32();
10370 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10374 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10378 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10384 store_reg(s, rs, tmp);
10386 tmp = load_reg(s, rs);
10389 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10393 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10397 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10403 tcg_temp_free_i32(tmp);
10405 } else if (insn & (1 << 20)) {
10406 gen_load_exclusive(s, rs, rd, addr, op);
10408 gen_store_exclusive(s, rm, rs, rd, addr, op);
10410 tcg_temp_free_i32(addr);
10413 /* Load/store multiple, RFE, SRS. */
10414 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
10415 /* RFE, SRS: not available in user mode or on M profile */
10416 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10419 if (insn & (1 << 20)) {
10421 addr = load_reg(s, rn);
10422 if ((insn & (1 << 24)) == 0)
10423 tcg_gen_addi_i32(addr, addr, -8);
10424 /* Load PC into tmp and CPSR into tmp2. */
10425 tmp = tcg_temp_new_i32();
10426 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10427 tcg_gen_addi_i32(addr, addr, 4);
10428 tmp2 = tcg_temp_new_i32();
10429 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
10430 if (insn & (1 << 21)) {
10431 /* Base writeback. */
10432 if (insn & (1 << 24)) {
10433 tcg_gen_addi_i32(addr, addr, 4);
10435 tcg_gen_addi_i32(addr, addr, -4);
10437 store_reg(s, rn, addr);
10439 tcg_temp_free_i32(addr);
10441 gen_rfe(s, tmp, tmp2);
10444 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10448 int i, loaded_base = 0;
10449 TCGv_i32 loaded_var;
10450 /* Load/store multiple. */
10451 addr = load_reg(s, rn);
10453 for (i = 0; i < 16; i++) {
10454 if (insn & (1 << i))
10457 if (insn & (1 << 24)) {
10458 tcg_gen_addi_i32(addr, addr, -offset);
10462 for (i = 0; i < 16; i++) {
10463 if ((insn & (1 << i)) == 0)
10465 if (insn & (1 << 20)) {
10467 tmp = tcg_temp_new_i32();
10468 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10470 gen_bx_excret(s, tmp);
10471 } else if (i == rn) {
10475 store_reg(s, i, tmp);
10479 tmp = load_reg(s, i);
10480 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10481 tcg_temp_free_i32(tmp);
10483 tcg_gen_addi_i32(addr, addr, 4);
10486 store_reg(s, rn, loaded_var);
10488 if (insn & (1 << 21)) {
10489 /* Base register writeback. */
10490 if (insn & (1 << 24)) {
10491 tcg_gen_addi_i32(addr, addr, -offset);
10493 /* Fault if writeback register is in register list. */
10494 if (insn & (1 << rn))
10496 store_reg(s, rn, addr);
10498 tcg_temp_free_i32(addr);
10505 op = (insn >> 21) & 0xf;
10507 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10510 /* Halfword pack. */
10511 tmp = load_reg(s, rn);
10512 tmp2 = load_reg(s, rm);
10513 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10514 if (insn & (1 << 5)) {
10518 tcg_gen_sari_i32(tmp2, tmp2, shift);
10519 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10520 tcg_gen_ext16u_i32(tmp2, tmp2);
10524 tcg_gen_shli_i32(tmp2, tmp2, shift);
10525 tcg_gen_ext16u_i32(tmp, tmp);
10526 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10528 tcg_gen_or_i32(tmp, tmp, tmp2);
10529 tcg_temp_free_i32(tmp2);
10530 store_reg(s, rd, tmp);
10532 /* Data processing register constant shift. */
10534 tmp = tcg_temp_new_i32();
10535 tcg_gen_movi_i32(tmp, 0);
10537 tmp = load_reg(s, rn);
10539 tmp2 = load_reg(s, rm);
10541 shiftop = (insn >> 4) & 3;
10542 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10543 conds = (insn & (1 << 20)) != 0;
10544 logic_cc = (conds && thumb2_logic_op(op));
10545 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10546 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10548 tcg_temp_free_i32(tmp2);
10550 store_reg(s, rd, tmp);
10552 tcg_temp_free_i32(tmp);
10556 case 13: /* Misc data processing. */
10557 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10558 if (op < 4 && (insn & 0xf000) != 0xf000)
10561 case 0: /* Register controlled shift. */
10562 tmp = load_reg(s, rn);
10563 tmp2 = load_reg(s, rm);
10564 if ((insn & 0x70) != 0)
10566 op = (insn >> 21) & 3;
10567 logic_cc = (insn & (1 << 20)) != 0;
10568 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10571 store_reg(s, rd, tmp);
10573 case 1: /* Sign/zero extend. */
10574 op = (insn >> 20) & 7;
10576 case 0: /* SXTAH, SXTH */
10577 case 1: /* UXTAH, UXTH */
10578 case 4: /* SXTAB, SXTB */
10579 case 5: /* UXTAB, UXTB */
10581 case 2: /* SXTAB16, SXTB16 */
10582 case 3: /* UXTAB16, UXTB16 */
10583 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10591 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10595 tmp = load_reg(s, rm);
10596 shift = (insn >> 4) & 3;
10597 /* ??? In many cases it's not necessary to do a
10598 rotate, a shift is sufficient. */
10600 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
10601 op = (insn >> 20) & 7;
10603 case 0: gen_sxth(tmp); break;
10604 case 1: gen_uxth(tmp); break;
10605 case 2: gen_sxtb16(tmp); break;
10606 case 3: gen_uxtb16(tmp); break;
10607 case 4: gen_sxtb(tmp); break;
10608 case 5: gen_uxtb(tmp); break;
10610 g_assert_not_reached();
10613 tmp2 = load_reg(s, rn);
10614 if ((op >> 1) == 1) {
10615 gen_add16(tmp, tmp2);
10617 tcg_gen_add_i32(tmp, tmp, tmp2);
10618 tcg_temp_free_i32(tmp2);
10621 store_reg(s, rd, tmp);
10623 case 2: /* SIMD add/subtract. */
10624 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10627 op = (insn >> 20) & 7;
10628 shift = (insn >> 4) & 7;
10629 if ((op & 3) == 3 || (shift & 3) == 3)
10631 tmp = load_reg(s, rn);
10632 tmp2 = load_reg(s, rm);
10633 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
10634 tcg_temp_free_i32(tmp2);
10635 store_reg(s, rd, tmp);
10637 case 3: /* Other data processing. */
10638 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10640 /* Saturating add/subtract. */
10641 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10644 tmp = load_reg(s, rn);
10645 tmp2 = load_reg(s, rm);
10647 gen_helper_double_saturate(tmp, cpu_env, tmp);
10649 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
10651 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
10652 tcg_temp_free_i32(tmp2);
10655 case 0x0a: /* rbit */
10656 case 0x08: /* rev */
10657 case 0x09: /* rev16 */
10658 case 0x0b: /* revsh */
10659 case 0x18: /* clz */
10661 case 0x10: /* sel */
10662 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10666 case 0x20: /* crc32/crc32c */
10672 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10679 tmp = load_reg(s, rn);
10681 case 0x0a: /* rbit */
10682 gen_helper_rbit(tmp, tmp);
10684 case 0x08: /* rev */
10685 tcg_gen_bswap32_i32(tmp, tmp);
10687 case 0x09: /* rev16 */
10690 case 0x0b: /* revsh */
10693 case 0x10: /* sel */
10694 tmp2 = load_reg(s, rm);
10695 tmp3 = tcg_temp_new_i32();
10696 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10697 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10698 tcg_temp_free_i32(tmp3);
10699 tcg_temp_free_i32(tmp2);
10701 case 0x18: /* clz */
10702 tcg_gen_clzi_i32(tmp, tmp, 32);
10712 uint32_t sz = op & 0x3;
10713 uint32_t c = op & 0x8;
10715 tmp2 = load_reg(s, rm);
10717 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10718 } else if (sz == 1) {
10719 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10721 tmp3 = tcg_const_i32(1 << sz);
10723 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10725 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10727 tcg_temp_free_i32(tmp2);
10728 tcg_temp_free_i32(tmp3);
10732 g_assert_not_reached();
10735 store_reg(s, rd, tmp);
10737 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10738 switch ((insn >> 20) & 7) {
10739 case 0: /* 32 x 32 -> 32 */
10740 case 7: /* Unsigned sum of absolute differences. */
10742 case 1: /* 16 x 16 -> 32 */
10743 case 2: /* Dual multiply add. */
10744 case 3: /* 32 * 16 -> 32msb */
10745 case 4: /* Dual multiply subtract. */
10746 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10747 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10752 op = (insn >> 4) & 0xf;
10753 tmp = load_reg(s, rn);
10754 tmp2 = load_reg(s, rm);
10755 switch ((insn >> 20) & 7) {
10756 case 0: /* 32 x 32 -> 32 */
10757 tcg_gen_mul_i32(tmp, tmp, tmp2);
10758 tcg_temp_free_i32(tmp2);
10760 tmp2 = load_reg(s, rs);
10762 tcg_gen_sub_i32(tmp, tmp2, tmp);
10764 tcg_gen_add_i32(tmp, tmp, tmp2);
10765 tcg_temp_free_i32(tmp2);
10768 case 1: /* 16 x 16 -> 32 */
10769 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10770 tcg_temp_free_i32(tmp2);
10772 tmp2 = load_reg(s, rs);
10773 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10774 tcg_temp_free_i32(tmp2);
10777 case 2: /* Dual multiply add. */
10778 case 4: /* Dual multiply subtract. */
10780 gen_swap_half(tmp2);
10781 gen_smul_dual(tmp, tmp2);
10782 if (insn & (1 << 22)) {
10783 /* This subtraction cannot overflow. */
10784 tcg_gen_sub_i32(tmp, tmp, tmp2);
10786 /* This addition cannot overflow 32 bits;
10787 * however it may overflow considered as a signed
10788 * operation, in which case we must set the Q flag.
10790 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10792 tcg_temp_free_i32(tmp2);
10795 tmp2 = load_reg(s, rs);
10796 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10797 tcg_temp_free_i32(tmp2);
10800 case 3: /* 32 * 16 -> 32msb */
10802 tcg_gen_sari_i32(tmp2, tmp2, 16);
10805 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10806 tcg_gen_shri_i64(tmp64, tmp64, 16);
10807 tmp = tcg_temp_new_i32();
10808 tcg_gen_extrl_i64_i32(tmp, tmp64);
10809 tcg_temp_free_i64(tmp64);
10812 tmp2 = load_reg(s, rs);
10813 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10814 tcg_temp_free_i32(tmp2);
10817 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10818 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10820 tmp = load_reg(s, rs);
10821 if (insn & (1 << 20)) {
10822 tmp64 = gen_addq_msw(tmp64, tmp);
10824 tmp64 = gen_subq_msw(tmp64, tmp);
10827 if (insn & (1 << 4)) {
10828 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10830 tcg_gen_shri_i64(tmp64, tmp64, 32);
10831 tmp = tcg_temp_new_i32();
10832 tcg_gen_extrl_i64_i32(tmp, tmp64);
10833 tcg_temp_free_i64(tmp64);
10835 case 7: /* Unsigned sum of absolute differences. */
10836 gen_helper_usad8(tmp, tmp, tmp2);
10837 tcg_temp_free_i32(tmp2);
10839 tmp2 = load_reg(s, rs);
10840 tcg_gen_add_i32(tmp, tmp, tmp2);
10841 tcg_temp_free_i32(tmp2);
10845 store_reg(s, rd, tmp);
10847 case 6: case 7: /* 64-bit multiply, Divide. */
10848 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10849 tmp = load_reg(s, rn);
10850 tmp2 = load_reg(s, rm);
10851 if ((op & 0x50) == 0x10) {
10853 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10857 gen_helper_udiv(tmp, tmp, tmp2);
10859 gen_helper_sdiv(tmp, tmp, tmp2);
10860 tcg_temp_free_i32(tmp2);
10861 store_reg(s, rd, tmp);
10862 } else if ((op & 0xe) == 0xc) {
10863 /* Dual multiply accumulate long. */
10864 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10865 tcg_temp_free_i32(tmp);
10866 tcg_temp_free_i32(tmp2);
10870 gen_swap_half(tmp2);
10871 gen_smul_dual(tmp, tmp2);
10873 tcg_gen_sub_i32(tmp, tmp, tmp2);
10875 tcg_gen_add_i32(tmp, tmp, tmp2);
10877 tcg_temp_free_i32(tmp2);
10879 tmp64 = tcg_temp_new_i64();
10880 tcg_gen_ext_i32_i64(tmp64, tmp);
10881 tcg_temp_free_i32(tmp);
10882 gen_addq(s, tmp64, rs, rd);
10883 gen_storeq_reg(s, rs, rd, tmp64);
10884 tcg_temp_free_i64(tmp64);
10887 /* Unsigned 64-bit multiply */
10888 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10892 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10893 tcg_temp_free_i32(tmp2);
10894 tcg_temp_free_i32(tmp);
10897 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10898 tcg_temp_free_i32(tmp2);
10899 tmp64 = tcg_temp_new_i64();
10900 tcg_gen_ext_i32_i64(tmp64, tmp);
10901 tcg_temp_free_i32(tmp);
10903 /* Signed 64-bit multiply */
10904 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10909 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10910 tcg_temp_free_i64(tmp64);
10913 gen_addq_lo(s, tmp64, rs);
10914 gen_addq_lo(s, tmp64, rd);
10915 } else if (op & 0x40) {
10916 /* 64-bit accumulate. */
10917 gen_addq(s, tmp64, rs, rd);
10919 gen_storeq_reg(s, rs, rd, tmp64);
10920 tcg_temp_free_i64(tmp64);
10925 case 6: case 7: case 14: case 15:
10927 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10928 /* We don't currently implement M profile FP support,
10929 * so this entire space should give a NOCP fault, with
10930 * the exception of the v8M VLLDM and VLSTM insns, which
10931 * must be NOPs in Secure state and UNDEF in Nonsecure state.
10933 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10934 (insn & 0xffa00f00) == 0xec200a00) {
10935 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10937 * We choose to UNDEF if the RAZ bits are non-zero.
10939 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10942 /* Just NOP since FP support is not implemented */
10945 /* All other insns: NOCP */
10946 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10947 default_exception_el(s));
10950 if ((insn & 0xfe000a00) == 0xfc000800
10951 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10952 /* The Thumb2 and ARM encodings are identical. */
10953 if (disas_neon_insn_3same_ext(s, insn)) {
10956 } else if ((insn & 0xff000a00) == 0xfe000800
10957 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10958 /* The Thumb2 and ARM encodings are identical. */
10959 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10962 } else if (((insn >> 24) & 3) == 3) {
10963 /* Translate into the equivalent ARM encoding. */
10964 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10965 if (disas_neon_data_insn(s, insn)) {
10968 } else if (((insn >> 8) & 0xe) == 10) {
10969 if (disas_vfp_insn(s, insn)) {
10973 if (insn & (1 << 28))
10975 if (disas_coproc_insn(s, insn)) {
10980 case 8: case 9: case 10: case 11:
10981 if (insn & (1 << 15)) {
10982 /* Branches, misc control. */
10983 if (insn & 0x5000) {
10984 /* Unconditional branch. */
10985 /* signextend(hw1[10:0]) -> offset[:12]. */
10986 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10987 /* hw1[10:0] -> offset[11:1]. */
10988 offset |= (insn & 0x7ff) << 1;
10989 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10990 offset[24:22] already have the same value because of the
10991 sign extension above. */
10992 offset ^= ((~insn) & (1 << 13)) << 10;
10993 offset ^= ((~insn) & (1 << 11)) << 11;
10995 if (insn & (1 << 14)) {
10996 /* Branch and link. */
10997 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
11001 if (insn & (1 << 12)) {
11003 gen_jmp(s, offset);
11006 offset &= ~(uint32_t)2;
11007 /* thumb2 bx, no need to check */
11008 gen_bx_im(s, offset);
11010 } else if (((insn >> 23) & 7) == 7) {
11012 if (insn & (1 << 13))
11015 if (insn & (1 << 26)) {
11016 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11019 if (!(insn & (1 << 20))) {
11020 /* Hypervisor call (v7) */
11021 int imm16 = extract32(insn, 16, 4) << 12
11022 | extract32(insn, 0, 12);
11029 /* Secure monitor call (v6+) */
11037 op = (insn >> 20) & 7;
11039 case 0: /* msr cpsr. */
11040 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11041 tmp = load_reg(s, rn);
11042 /* the constant is the mask and SYSm fields */
11043 addr = tcg_const_i32(insn & 0xfff);
11044 gen_helper_v7m_msr(cpu_env, addr, tmp);
11045 tcg_temp_free_i32(addr);
11046 tcg_temp_free_i32(tmp);
11051 case 1: /* msr spsr. */
11052 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11056 if (extract32(insn, 5, 1)) {
11058 int sysm = extract32(insn, 8, 4) |
11059 (extract32(insn, 4, 1) << 4);
11062 gen_msr_banked(s, r, sysm, rm);
11066 /* MSR (for PSRs) */
11067 tmp = load_reg(s, rn);
11069 msr_mask(s, (insn >> 8) & 0xf, op == 1),
11073 case 2: /* cps, nop-hint. */
11074 if (((insn >> 8) & 7) == 0) {
11075 gen_nop_hint(s, insn & 0xff);
11077 /* Implemented as NOP in user mode. */
11082 if (insn & (1 << 10)) {
11083 if (insn & (1 << 7))
11085 if (insn & (1 << 6))
11087 if (insn & (1 << 5))
11089 if (insn & (1 << 9))
11090 imm = CPSR_A | CPSR_I | CPSR_F;
11092 if (insn & (1 << 8)) {
11094 imm |= (insn & 0x1f);
11097 gen_set_psr_im(s, offset, 0, imm);
11100 case 3: /* Special control operations. */
11101 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
11102 !arm_dc_feature(s, ARM_FEATURE_M)) {
11105 op = (insn >> 4) & 0xf;
11107 case 2: /* clrex */
11112 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11115 /* We need to break the TB after this insn
11116 * to execute self-modifying code correctly
11117 * and also to take any pending interrupts
11120 gen_goto_tb(s, 0, s->pc & ~1);
11127 /* Trivial implementation equivalent to bx.
11128 * This instruction doesn't exist at all for M-profile.
11130 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11133 tmp = load_reg(s, rn);
11136 case 5: /* Exception return. */
11140 if (rn != 14 || rd != 15) {
11143 tmp = load_reg(s, rn);
11144 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11145 gen_exception_return(s, tmp);
11148 if (extract32(insn, 5, 1) &&
11149 !arm_dc_feature(s, ARM_FEATURE_M)) {
11151 int sysm = extract32(insn, 16, 4) |
11152 (extract32(insn, 4, 1) << 4);
11154 gen_mrs_banked(s, 0, sysm, rd);
11158 if (extract32(insn, 16, 4) != 0xf) {
11161 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11162 extract32(insn, 0, 8) != 0) {
11167 tmp = tcg_temp_new_i32();
11168 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11169 addr = tcg_const_i32(insn & 0xff);
11170 gen_helper_v7m_mrs(tmp, cpu_env, addr);
11171 tcg_temp_free_i32(addr);
11173 gen_helper_cpsr_read(tmp, cpu_env);
11175 store_reg(s, rd, tmp);
11178 if (extract32(insn, 5, 1) &&
11179 !arm_dc_feature(s, ARM_FEATURE_M)) {
11181 int sysm = extract32(insn, 16, 4) |
11182 (extract32(insn, 4, 1) << 4);
11184 gen_mrs_banked(s, 1, sysm, rd);
11189 /* Not accessible in user mode. */
11190 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
11194 if (extract32(insn, 16, 4) != 0xf ||
11195 extract32(insn, 0, 8) != 0) {
11199 tmp = load_cpu_field(spsr);
11200 store_reg(s, rd, tmp);
11205 /* Conditional branch. */
11206 op = (insn >> 22) & 0xf;
11207 /* Generate a conditional jump to next instruction. */
11208 s->condlabel = gen_new_label();
11209 arm_gen_test_cc(op ^ 1, s->condlabel);
11212 /* offset[11:1] = insn[10:0] */
11213 offset = (insn & 0x7ff) << 1;
11214 /* offset[17:12] = insn[21:16]. */
11215 offset |= (insn & 0x003f0000) >> 4;
11216 /* offset[31:20] = insn[26]. */
11217 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11218 /* offset[18] = insn[13]. */
11219 offset |= (insn & (1 << 13)) << 5;
11220 /* offset[19] = insn[11]. */
11221 offset |= (insn & (1 << 11)) << 8;
11223 /* jump to the offset */
11224 gen_jmp(s, s->pc + offset);
11227 /* Data processing immediate. */
11228 if (insn & (1 << 25)) {
11229 if (insn & (1 << 24)) {
11230 if (insn & (1 << 20))
11232 /* Bitfield/Saturate. */
11233 op = (insn >> 21) & 7;
11235 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11237 tmp = tcg_temp_new_i32();
11238 tcg_gen_movi_i32(tmp, 0);
11240 tmp = load_reg(s, rn);
11243 case 2: /* Signed bitfield extract. */
11245 if (shift + imm > 32)
11248 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11251 case 6: /* Unsigned bitfield extract. */
11253 if (shift + imm > 32)
11256 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11259 case 3: /* Bitfield insert/clear. */
11262 imm = imm + 1 - shift;
11264 tmp2 = load_reg(s, rd);
11265 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
11266 tcg_temp_free_i32(tmp2);
11271 default: /* Saturate. */
11274 tcg_gen_sari_i32(tmp, tmp, shift);
11276 tcg_gen_shli_i32(tmp, tmp, shift);
11278 tmp2 = tcg_const_i32(imm);
11281 if ((op & 1) && shift == 0) {
11282 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11283 tcg_temp_free_i32(tmp);
11284 tcg_temp_free_i32(tmp2);
11287 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
11289 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
11293 if ((op & 1) && shift == 0) {
11294 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11295 tcg_temp_free_i32(tmp);
11296 tcg_temp_free_i32(tmp2);
11299 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
11301 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
11304 tcg_temp_free_i32(tmp2);
11307 store_reg(s, rd, tmp);
11309 imm = ((insn & 0x04000000) >> 15)
11310 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11311 if (insn & (1 << 22)) {
11312 /* 16-bit immediate. */
11313 imm |= (insn >> 4) & 0xf000;
11314 if (insn & (1 << 23)) {
11316 tmp = load_reg(s, rd);
11317 tcg_gen_ext16u_i32(tmp, tmp);
11318 tcg_gen_ori_i32(tmp, tmp, imm << 16);
11321 tmp = tcg_temp_new_i32();
11322 tcg_gen_movi_i32(tmp, imm);
11325 /* Add/sub 12-bit immediate. */
11327 offset = s->pc & ~(uint32_t)3;
11328 if (insn & (1 << 23))
11332 tmp = tcg_temp_new_i32();
11333 tcg_gen_movi_i32(tmp, offset);
11335 tmp = load_reg(s, rn);
11336 if (insn & (1 << 23))
11337 tcg_gen_subi_i32(tmp, tmp, imm);
11339 tcg_gen_addi_i32(tmp, tmp, imm);
11342 store_reg(s, rd, tmp);
11345 int shifter_out = 0;
11346 /* modified 12-bit immediate. */
11347 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11348 imm = (insn & 0xff);
11351 /* Nothing to do. */
11353 case 1: /* 00XY00XY */
11356 case 2: /* XY00XY00 */
11360 case 3: /* XYXYXYXY */
11364 default: /* Rotated constant. */
11365 shift = (shift << 1) | (imm >> 7);
11367 imm = imm << (32 - shift);
11371 tmp2 = tcg_temp_new_i32();
11372 tcg_gen_movi_i32(tmp2, imm);
11373 rn = (insn >> 16) & 0xf;
11375 tmp = tcg_temp_new_i32();
11376 tcg_gen_movi_i32(tmp, 0);
11378 tmp = load_reg(s, rn);
11380 op = (insn >> 21) & 0xf;
11381 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
11382 shifter_out, tmp, tmp2))
11384 tcg_temp_free_i32(tmp2);
11385 rd = (insn >> 8) & 0xf;
11387 store_reg(s, rd, tmp);
11389 tcg_temp_free_i32(tmp);
11394 case 12: /* Load/store single data item. */
11401 if ((insn & 0x01100000) == 0x01000000) {
11402 if (disas_neon_ls_insn(s, insn)) {
11407 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11409 if (!(insn & (1 << 20))) {
11413 /* Byte or halfword load space with dest == r15 : memory hints.
11414 * Catch them early so we don't emit pointless addressing code.
11415 * This space is a mix of:
11416 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11417 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11419 * unallocated hints, which must be treated as NOPs
11420 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11421 * which is easiest for the decoding logic
11422 * Some space which must UNDEF
11424 int op1 = (insn >> 23) & 3;
11425 int op2 = (insn >> 6) & 0x3f;
11430 /* UNPREDICTABLE, unallocated hint or
11431 * PLD/PLDW/PLI (literal)
11436 return; /* PLD/PLDW/PLI or unallocated hint */
11438 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
11439 return; /* PLD/PLDW/PLI or unallocated hint */
11441 /* UNDEF space, or an UNPREDICTABLE */
11445 memidx = get_mem_index(s);
11447 addr = tcg_temp_new_i32();
11449 /* s->pc has already been incremented by 4. */
11450 imm = s->pc & 0xfffffffc;
11451 if (insn & (1 << 23))
11452 imm += insn & 0xfff;
11454 imm -= insn & 0xfff;
11455 tcg_gen_movi_i32(addr, imm);
11457 addr = load_reg(s, rn);
11458 if (insn & (1 << 23)) {
11459 /* Positive offset. */
11460 imm = insn & 0xfff;
11461 tcg_gen_addi_i32(addr, addr, imm);
11464 switch ((insn >> 8) & 0xf) {
11465 case 0x0: /* Shifted Register. */
11466 shift = (insn >> 4) & 0xf;
11468 tcg_temp_free_i32(addr);
11471 tmp = load_reg(s, rm);
11473 tcg_gen_shli_i32(tmp, tmp, shift);
11474 tcg_gen_add_i32(addr, addr, tmp);
11475 tcg_temp_free_i32(tmp);
11477 case 0xc: /* Negative offset. */
11478 tcg_gen_addi_i32(addr, addr, -imm);
11480 case 0xe: /* User privilege. */
11481 tcg_gen_addi_i32(addr, addr, imm);
11482 memidx = get_a32_user_mem_index(s);
11484 case 0x9: /* Post-decrement. */
11486 /* Fall through. */
11487 case 0xb: /* Post-increment. */
11491 case 0xd: /* Pre-decrement. */
11493 /* Fall through. */
11494 case 0xf: /* Pre-increment. */
11495 tcg_gen_addi_i32(addr, addr, imm);
11499 tcg_temp_free_i32(addr);
11505 issinfo = writeback ? ISSInvalid : rs;
11507 if (insn & (1 << 20)) {
11509 tmp = tcg_temp_new_i32();
11512 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
11515 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
11518 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
11521 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
11524 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
11527 tcg_temp_free_i32(tmp);
11528 tcg_temp_free_i32(addr);
11532 gen_bx_excret(s, tmp);
11534 store_reg(s, rs, tmp);
11538 tmp = load_reg(s, rs);
11541 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
11544 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
11547 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
11550 tcg_temp_free_i32(tmp);
11551 tcg_temp_free_i32(addr);
11554 tcg_temp_free_i32(tmp);
11557 tcg_gen_addi_i32(addr, addr, imm);
11559 store_reg(s, rn, addr);
11561 tcg_temp_free_i32(addr);
11570 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11571 default_exception_el(s));
11574 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
11576 uint32_t val, op, rm, rn, rd, shift, cond;
11583 switch (insn >> 12) {
11587 op = (insn >> 11) & 3;
11590 rn = (insn >> 3) & 7;
11591 tmp = load_reg(s, rn);
11592 if (insn & (1 << 10)) {
11594 tmp2 = tcg_temp_new_i32();
11595 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
11598 rm = (insn >> 6) & 7;
11599 tmp2 = load_reg(s, rm);
11601 if (insn & (1 << 9)) {
11602 if (s->condexec_mask)
11603 tcg_gen_sub_i32(tmp, tmp, tmp2);
11605 gen_sub_CC(tmp, tmp, tmp2);
11607 if (s->condexec_mask)
11608 tcg_gen_add_i32(tmp, tmp, tmp2);
11610 gen_add_CC(tmp, tmp, tmp2);
11612 tcg_temp_free_i32(tmp2);
11613 store_reg(s, rd, tmp);
11615 /* shift immediate */
11616 rm = (insn >> 3) & 7;
11617 shift = (insn >> 6) & 0x1f;
11618 tmp = load_reg(s, rm);
11619 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11620 if (!s->condexec_mask)
11622 store_reg(s, rd, tmp);
11626 /* arithmetic large immediate */
11627 op = (insn >> 11) & 3;
11628 rd = (insn >> 8) & 0x7;
11629 if (op == 0) { /* mov */
11630 tmp = tcg_temp_new_i32();
11631 tcg_gen_movi_i32(tmp, insn & 0xff);
11632 if (!s->condexec_mask)
11634 store_reg(s, rd, tmp);
11636 tmp = load_reg(s, rd);
11637 tmp2 = tcg_temp_new_i32();
11638 tcg_gen_movi_i32(tmp2, insn & 0xff);
11641 gen_sub_CC(tmp, tmp, tmp2);
11642 tcg_temp_free_i32(tmp);
11643 tcg_temp_free_i32(tmp2);
11646 if (s->condexec_mask)
11647 tcg_gen_add_i32(tmp, tmp, tmp2);
11649 gen_add_CC(tmp, tmp, tmp2);
11650 tcg_temp_free_i32(tmp2);
11651 store_reg(s, rd, tmp);
11654 if (s->condexec_mask)
11655 tcg_gen_sub_i32(tmp, tmp, tmp2);
11657 gen_sub_CC(tmp, tmp, tmp2);
11658 tcg_temp_free_i32(tmp2);
11659 store_reg(s, rd, tmp);
11665 if (insn & (1 << 11)) {
11666 rd = (insn >> 8) & 7;
11667 /* load pc-relative. Bit 1 of PC is ignored. */
11668 val = s->pc + 2 + ((insn & 0xff) * 4);
11669 val &= ~(uint32_t)2;
11670 addr = tcg_temp_new_i32();
11671 tcg_gen_movi_i32(addr, val);
11672 tmp = tcg_temp_new_i32();
11673 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11675 tcg_temp_free_i32(addr);
11676 store_reg(s, rd, tmp);
11679 if (insn & (1 << 10)) {
11680 /* 0b0100_01xx_xxxx_xxxx
11681 * - data processing extended, branch and exchange
11683 rd = (insn & 7) | ((insn >> 4) & 8);
11684 rm = (insn >> 3) & 0xf;
11685 op = (insn >> 8) & 3;
11688 tmp = load_reg(s, rd);
11689 tmp2 = load_reg(s, rm);
11690 tcg_gen_add_i32(tmp, tmp, tmp2);
11691 tcg_temp_free_i32(tmp2);
11692 store_reg(s, rd, tmp);
11695 tmp = load_reg(s, rd);
11696 tmp2 = load_reg(s, rm);
11697 gen_sub_CC(tmp, tmp, tmp2);
11698 tcg_temp_free_i32(tmp2);
11699 tcg_temp_free_i32(tmp);
11701 case 2: /* mov/cpy */
11702 tmp = load_reg(s, rm);
11703 store_reg(s, rd, tmp);
11707 /* 0b0100_0111_xxxx_xxxx
11708 * - branch [and link] exchange thumb register
11710 bool link = insn & (1 << 7);
11719 /* BXNS/BLXNS: only exists for v8M with the
11720 * security extensions, and always UNDEF if NonSecure.
11721 * We don't implement these in the user-only mode
11722 * either (in theory you can use them from Secure User
11723 * mode but they are too tied in to system emulation.)
11725 if (!s->v8m_secure || IS_USER_ONLY) {
11736 tmp = load_reg(s, rm);
11738 val = (uint32_t)s->pc | 1;
11739 tmp2 = tcg_temp_new_i32();
11740 tcg_gen_movi_i32(tmp2, val);
11741 store_reg(s, 14, tmp2);
11744 /* Only BX works as exception-return, not BLX */
11745 gen_bx_excret(s, tmp);
11753 /* data processing register */
11755 rm = (insn >> 3) & 7;
11756 op = (insn >> 6) & 0xf;
11757 if (op == 2 || op == 3 || op == 4 || op == 7) {
11758 /* the shift/rotate ops want the operands backwards */
11767 if (op == 9) { /* neg */
11768 tmp = tcg_temp_new_i32();
11769 tcg_gen_movi_i32(tmp, 0);
11770 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11771 tmp = load_reg(s, rd);
11776 tmp2 = load_reg(s, rm);
11778 case 0x0: /* and */
11779 tcg_gen_and_i32(tmp, tmp, tmp2);
11780 if (!s->condexec_mask)
11783 case 0x1: /* eor */
11784 tcg_gen_xor_i32(tmp, tmp, tmp2);
11785 if (!s->condexec_mask)
11788 case 0x2: /* lsl */
11789 if (s->condexec_mask) {
11790 gen_shl(tmp2, tmp2, tmp);
11792 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11793 gen_logic_CC(tmp2);
11796 case 0x3: /* lsr */
11797 if (s->condexec_mask) {
11798 gen_shr(tmp2, tmp2, tmp);
11800 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11801 gen_logic_CC(tmp2);
11804 case 0x4: /* asr */
11805 if (s->condexec_mask) {
11806 gen_sar(tmp2, tmp2, tmp);
11808 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11809 gen_logic_CC(tmp2);
11812 case 0x5: /* adc */
11813 if (s->condexec_mask) {
11814 gen_adc(tmp, tmp2);
11816 gen_adc_CC(tmp, tmp, tmp2);
11819 case 0x6: /* sbc */
11820 if (s->condexec_mask) {
11821 gen_sub_carry(tmp, tmp, tmp2);
11823 gen_sbc_CC(tmp, tmp, tmp2);
11826 case 0x7: /* ror */
11827 if (s->condexec_mask) {
11828 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11829 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11831 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11832 gen_logic_CC(tmp2);
11835 case 0x8: /* tst */
11836 tcg_gen_and_i32(tmp, tmp, tmp2);
11840 case 0x9: /* neg */
11841 if (s->condexec_mask)
11842 tcg_gen_neg_i32(tmp, tmp2);
11844 gen_sub_CC(tmp, tmp, tmp2);
11846 case 0xa: /* cmp */
11847 gen_sub_CC(tmp, tmp, tmp2);
11850 case 0xb: /* cmn */
11851 gen_add_CC(tmp, tmp, tmp2);
11854 case 0xc: /* orr */
11855 tcg_gen_or_i32(tmp, tmp, tmp2);
11856 if (!s->condexec_mask)
11859 case 0xd: /* mul */
11860 tcg_gen_mul_i32(tmp, tmp, tmp2);
11861 if (!s->condexec_mask)
11864 case 0xe: /* bic */
11865 tcg_gen_andc_i32(tmp, tmp, tmp2);
11866 if (!s->condexec_mask)
11869 case 0xf: /* mvn */
11870 tcg_gen_not_i32(tmp2, tmp2);
11871 if (!s->condexec_mask)
11872 gen_logic_CC(tmp2);
11879 store_reg(s, rm, tmp2);
11881 tcg_temp_free_i32(tmp);
11883 store_reg(s, rd, tmp);
11884 tcg_temp_free_i32(tmp2);
11887 tcg_temp_free_i32(tmp);
11888 tcg_temp_free_i32(tmp2);
11893 /* load/store register offset. */
11895 rn = (insn >> 3) & 7;
11896 rm = (insn >> 6) & 7;
11897 op = (insn >> 9) & 7;
11898 addr = load_reg(s, rn);
11899 tmp = load_reg(s, rm);
11900 tcg_gen_add_i32(addr, addr, tmp);
11901 tcg_temp_free_i32(tmp);
11903 if (op < 3) { /* store */
11904 tmp = load_reg(s, rd);
11906 tmp = tcg_temp_new_i32();
11911 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11914 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11917 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11919 case 3: /* ldrsb */
11920 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11923 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11926 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11929 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11931 case 7: /* ldrsh */
11932 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11935 if (op >= 3) { /* load */
11936 store_reg(s, rd, tmp);
11938 tcg_temp_free_i32(tmp);
11940 tcg_temp_free_i32(addr);
11944 /* load/store word immediate offset */
11946 rn = (insn >> 3) & 7;
11947 addr = load_reg(s, rn);
11948 val = (insn >> 4) & 0x7c;
11949 tcg_gen_addi_i32(addr, addr, val);
11951 if (insn & (1 << 11)) {
11953 tmp = tcg_temp_new_i32();
11954 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11955 store_reg(s, rd, tmp);
11958 tmp = load_reg(s, rd);
11959 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11960 tcg_temp_free_i32(tmp);
11962 tcg_temp_free_i32(addr);
11966 /* load/store byte immediate offset */
11968 rn = (insn >> 3) & 7;
11969 addr = load_reg(s, rn);
11970 val = (insn >> 6) & 0x1f;
11971 tcg_gen_addi_i32(addr, addr, val);
11973 if (insn & (1 << 11)) {
11975 tmp = tcg_temp_new_i32();
11976 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11977 store_reg(s, rd, tmp);
11980 tmp = load_reg(s, rd);
11981 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11982 tcg_temp_free_i32(tmp);
11984 tcg_temp_free_i32(addr);
11988 /* load/store halfword immediate offset */
11990 rn = (insn >> 3) & 7;
11991 addr = load_reg(s, rn);
11992 val = (insn >> 5) & 0x3e;
11993 tcg_gen_addi_i32(addr, addr, val);
11995 if (insn & (1 << 11)) {
11997 tmp = tcg_temp_new_i32();
11998 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11999 store_reg(s, rd, tmp);
12002 tmp = load_reg(s, rd);
12003 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12004 tcg_temp_free_i32(tmp);
12006 tcg_temp_free_i32(addr);
12010 /* load/store from stack */
12011 rd = (insn >> 8) & 7;
12012 addr = load_reg(s, 13);
12013 val = (insn & 0xff) * 4;
12014 tcg_gen_addi_i32(addr, addr, val);
12016 if (insn & (1 << 11)) {
12018 tmp = tcg_temp_new_i32();
12019 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12020 store_reg(s, rd, tmp);
12023 tmp = load_reg(s, rd);
12024 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12025 tcg_temp_free_i32(tmp);
12027 tcg_temp_free_i32(addr);
12031 /* add to high reg */
12032 rd = (insn >> 8) & 7;
12033 if (insn & (1 << 11)) {
12035 tmp = load_reg(s, 13);
12037 /* PC. bit 1 is ignored. */
12038 tmp = tcg_temp_new_i32();
12039 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
12041 val = (insn & 0xff) * 4;
12042 tcg_gen_addi_i32(tmp, tmp, val);
12043 store_reg(s, rd, tmp);
12048 op = (insn >> 8) & 0xf;
12051 /* adjust stack pointer */
12052 tmp = load_reg(s, 13);
12053 val = (insn & 0x7f) * 4;
12054 if (insn & (1 << 7))
12055 val = -(int32_t)val;
12056 tcg_gen_addi_i32(tmp, tmp, val);
12057 store_reg(s, 13, tmp);
12060 case 2: /* sign/zero extend. */
12063 rm = (insn >> 3) & 7;
12064 tmp = load_reg(s, rm);
12065 switch ((insn >> 6) & 3) {
12066 case 0: gen_sxth(tmp); break;
12067 case 1: gen_sxtb(tmp); break;
12068 case 2: gen_uxth(tmp); break;
12069 case 3: gen_uxtb(tmp); break;
12071 store_reg(s, rd, tmp);
12073 case 4: case 5: case 0xc: case 0xd:
12075 addr = load_reg(s, 13);
12076 if (insn & (1 << 8))
12080 for (i = 0; i < 8; i++) {
12081 if (insn & (1 << i))
12084 if ((insn & (1 << 11)) == 0) {
12085 tcg_gen_addi_i32(addr, addr, -offset);
12087 for (i = 0; i < 8; i++) {
12088 if (insn & (1 << i)) {
12089 if (insn & (1 << 11)) {
12091 tmp = tcg_temp_new_i32();
12092 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12093 store_reg(s, i, tmp);
12096 tmp = load_reg(s, i);
12097 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12098 tcg_temp_free_i32(tmp);
12100 /* advance to the next address. */
12101 tcg_gen_addi_i32(addr, addr, 4);
12105 if (insn & (1 << 8)) {
12106 if (insn & (1 << 11)) {
12108 tmp = tcg_temp_new_i32();
12109 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12110 /* don't set the pc until the rest of the instruction
12114 tmp = load_reg(s, 14);
12115 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12116 tcg_temp_free_i32(tmp);
12118 tcg_gen_addi_i32(addr, addr, 4);
12120 if ((insn & (1 << 11)) == 0) {
12121 tcg_gen_addi_i32(addr, addr, -offset);
12123 /* write back the new stack pointer */
12124 store_reg(s, 13, addr);
12125 /* set the new PC value */
12126 if ((insn & 0x0900) == 0x0900) {
12127 store_reg_from_load(s, 15, tmp);
12131 case 1: case 3: case 9: case 11: /* czb */
12133 tmp = load_reg(s, rm);
12134 s->condlabel = gen_new_label();
12136 if (insn & (1 << 11))
12137 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
12139 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
12140 tcg_temp_free_i32(tmp);
12141 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12142 val = (uint32_t)s->pc + 2;
12147 case 15: /* IT, nop-hint. */
12148 if ((insn & 0xf) == 0) {
12149 gen_nop_hint(s, (insn >> 4) & 0xf);
12153 s->condexec_cond = (insn >> 4) & 0xe;
12154 s->condexec_mask = insn & 0x1f;
12155 /* No actual code generated for this insn, just setup state. */
12158 case 0xe: /* bkpt */
12160 int imm8 = extract32(insn, 0, 8);
12162 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
12166 case 0xa: /* rev, and hlt */
12168 int op1 = extract32(insn, 6, 2);
12172 int imm6 = extract32(insn, 0, 6);
12178 /* Otherwise this is rev */
12180 rn = (insn >> 3) & 0x7;
12182 tmp = load_reg(s, rn);
12184 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
12185 case 1: gen_rev16(tmp); break;
12186 case 3: gen_revsh(tmp); break;
12188 g_assert_not_reached();
12190 store_reg(s, rd, tmp);
12195 switch ((insn >> 5) & 7) {
12199 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12200 gen_helper_setend(cpu_env);
12201 s->base.is_jmp = DISAS_UPDATE;
12210 if (arm_dc_feature(s, ARM_FEATURE_M)) {
12211 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12214 addr = tcg_const_i32(19);
12215 gen_helper_v7m_msr(cpu_env, addr, tmp);
12216 tcg_temp_free_i32(addr);
12220 addr = tcg_const_i32(16);
12221 gen_helper_v7m_msr(cpu_env, addr, tmp);
12222 tcg_temp_free_i32(addr);
12224 tcg_temp_free_i32(tmp);
12227 if (insn & (1 << 4)) {
12228 shift = CPSR_A | CPSR_I | CPSR_F;
12232 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
12247 /* load/store multiple */
12248 TCGv_i32 loaded_var = NULL;
12249 rn = (insn >> 8) & 0x7;
12250 addr = load_reg(s, rn);
12251 for (i = 0; i < 8; i++) {
12252 if (insn & (1 << i)) {
12253 if (insn & (1 << 11)) {
12255 tmp = tcg_temp_new_i32();
12256 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12260 store_reg(s, i, tmp);
12264 tmp = load_reg(s, i);
12265 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12266 tcg_temp_free_i32(tmp);
12268 /* advance to the next address */
12269 tcg_gen_addi_i32(addr, addr, 4);
12272 if ((insn & (1 << rn)) == 0) {
12273 /* base reg not in list: base register writeback */
12274 store_reg(s, rn, addr);
12276 /* base reg in list: if load, complete it now */
12277 if (insn & (1 << 11)) {
12278 store_reg(s, rn, loaded_var);
12280 tcg_temp_free_i32(addr);
12285 /* conditional branch or swi */
12286 cond = (insn >> 8) & 0xf;
12292 gen_set_pc_im(s, s->pc);
12293 s->svc_imm = extract32(insn, 0, 8);
12294 s->base.is_jmp = DISAS_SWI;
12297 /* generate a conditional jump to next instruction */
12298 s->condlabel = gen_new_label();
12299 arm_gen_test_cc(cond ^ 1, s->condlabel);
12302 /* jump to the offset */
12303 val = (uint32_t)s->pc + 2;
12304 offset = ((int32_t)insn << 24) >> 24;
12305 val += offset << 1;
12310 if (insn & (1 << 11)) {
12311 /* thumb_insn_is_16bit() ensures we can't get here for
12312 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12313 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12315 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12317 offset = ((insn & 0x7ff) << 1);
12318 tmp = load_reg(s, 14);
12319 tcg_gen_addi_i32(tmp, tmp, offset);
12320 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12322 tmp2 = tcg_temp_new_i32();
12323 tcg_gen_movi_i32(tmp2, s->pc | 1);
12324 store_reg(s, 14, tmp2);
12328 /* unconditional branch */
12329 val = (uint32_t)s->pc;
12330 offset = ((int32_t)insn << 21) >> 21;
12331 val += (offset << 1) + 2;
12336 /* thumb_insn_is_16bit() ensures we can't get here for
12337 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12339 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12341 if (insn & (1 << 11)) {
12342 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12343 offset = ((insn & 0x7ff) << 1) | 1;
12344 tmp = load_reg(s, 14);
12345 tcg_gen_addi_i32(tmp, tmp, offset);
12347 tmp2 = tcg_temp_new_i32();
12348 tcg_gen_movi_i32(tmp2, s->pc | 1);
12349 store_reg(s, 14, tmp2);
12352 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12353 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12355 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12362 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12363 default_exception_el(s));
12366 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12368 /* Return true if the insn at dc->pc might cross a page boundary.
12369 * (False positives are OK, false negatives are not.)
12370 * We know this is a Thumb insn, and our caller ensures we are
12371 * only called if dc->pc is less than 4 bytes from the page
12372 * boundary, so we cross the page if the first 16 bits indicate
12373 * that this is a 32 bit insn.
12375 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
12377 return !thumb_insn_is_16bit(s, insn);
12380 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
12382 DisasContext *dc = container_of(dcbase, DisasContext, base);
12383 CPUARMState *env = cs->env_ptr;
12384 ARMCPU *cpu = arm_env_get_cpu(env);
12386 dc->pc = dc->base.pc_first;
12390 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12391 * there is no secure EL1, so we route exceptions to EL3.
12393 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12394 !arm_el_is_aa64(env, 3);
12395 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12396 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12397 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12398 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12399 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12400 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
12401 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
12402 #if !defined(CONFIG_USER_ONLY)
12403 dc->user = (dc->current_el == 0);
12405 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12406 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12407 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12408 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12409 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12410 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12411 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
12412 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12413 regime_is_secure(env, dc->mmu_idx);
12414 dc->cp_regs = cpu->cp_regs;
12415 dc->features = env->features;
12417 /* Single step state. The code-generation logic here is:
12419 * generate code with no special handling for single-stepping (except
12420 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12421 * this happens anyway because those changes are all system register or
12423 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12424 * emit code for one insn
12425 * emit code to clear PSTATE.SS
12426 * emit code to generate software step exception for completed step
12427 * end TB (as usual for having generated an exception)
12428 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12429 * emit code to generate a software step exception
12432 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12433 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
12434 dc->is_ldex = false;
12435 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12437 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
12439 /* If architectural single step active, limit to 1. */
12440 if (is_singlestepping(dc)) {
12441 dc->base.max_insns = 1;
12444 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12445 to those left on the page. */
12447 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
12448 dc->base.max_insns = MIN(dc->base.max_insns, bound);
12451 cpu_F0s = tcg_temp_new_i32();
12452 cpu_F1s = tcg_temp_new_i32();
12453 cpu_F0d = tcg_temp_new_i64();
12454 cpu_F1d = tcg_temp_new_i64();
12457 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
12458 cpu_M0 = tcg_temp_new_i64();
12461 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12463 DisasContext *dc = container_of(dcbase, DisasContext, base);
12465 /* A note on handling of the condexec (IT) bits:
12467 * We want to avoid the overhead of having to write the updated condexec
12468 * bits back to the CPUARMState for every instruction in an IT block. So:
12469 * (1) if the condexec bits are not already zero then we write
12470 * zero back into the CPUARMState now. This avoids complications trying
12471 * to do it at the end of the block. (For example if we don't do this
12472 * it's hard to identify whether we can safely skip writing condexec
12473 * at the end of the TB, which we definitely want to do for the case
12474 * where a TB doesn't do anything with the IT state at all.)
12475 * (2) if we are going to leave the TB then we call gen_set_condexec()
12476 * which will write the correct value into CPUARMState if zero is wrong.
12477 * This is done both for leaving the TB at the end, and for leaving
12478 * it because of an exception we know will happen, which is done in
12479 * gen_exception_insn(). The latter is necessary because we need to
12480 * leave the TB with the PC/IT state just prior to execution of the
12481 * instruction which caused the exception.
12482 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12483 * then the CPUARMState will be wrong and we need to reset it.
12484 * This is handled in the same way as restoration of the
12485 * PC in these situations; we save the value of the condexec bits
12486 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12487 * then uses this to restore them after an exception.
12489 * Note that there are no instructions which can read the condexec
12490 * bits, and none which can write non-static values to them, so
12491 * we don't need to care about whether CPUARMState is correct in the
12495 /* Reset the conditional execution bits immediately. This avoids
12496 complications trying to do it at the end of the block. */
12497 if (dc->condexec_mask || dc->condexec_cond) {
12498 TCGv_i32 tmp = tcg_temp_new_i32();
12499 tcg_gen_movi_i32(tmp, 0);
12500 store_cpu_field(tmp, condexec_bits);
12502 tcg_clear_temp_count();
12505 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12507 DisasContext *dc = container_of(dcbase, DisasContext, base);
12509 tcg_gen_insn_start(dc->pc,
12510 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12512 dc->insn_start = tcg_last_op();
12515 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12516 const CPUBreakpoint *bp)
12518 DisasContext *dc = container_of(dcbase, DisasContext, base);
12520 if (bp->flags & BP_CPU) {
12521 gen_set_condexec(dc);
12522 gen_set_pc_im(dc, dc->pc);
12523 gen_helper_check_breakpoints(cpu_env);
12524 /* End the TB early; it's likely not going to be executed */
12525 dc->base.is_jmp = DISAS_TOO_MANY;
12527 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12528 /* The address covered by the breakpoint must be
12529 included in [tb->pc, tb->pc + tb->size) in order
12530 to for it to be properly cleared -- thus we
12531 increment the PC here so that the logic setting
12532 tb->size below does the right thing. */
12533 /* TODO: Advance PC by correct instruction length to
12534 * avoid disassembler error messages */
12536 dc->base.is_jmp = DISAS_NORETURN;
12542 static bool arm_pre_translate_insn(DisasContext *dc)
12544 #ifdef CONFIG_USER_ONLY
12545 /* Intercept jump to the magic kernel page. */
12546 if (dc->pc >= 0xffff0000) {
12547 /* We always get here via a jump, so know we are not in a
12548 conditional execution block. */
12549 gen_exception_internal(EXCP_KERNEL_TRAP);
12550 dc->base.is_jmp = DISAS_NORETURN;
12555 if (dc->ss_active && !dc->pstate_ss) {
12556 /* Singlestep state is Active-pending.
12557 * If we're in this state at the start of a TB then either
12558 * a) we just took an exception to an EL which is being debugged
12559 * and this is the first insn in the exception handler
12560 * b) debug exceptions were masked and we just unmasked them
12561 * without changing EL (eg by clearing PSTATE.D)
12562 * In either case we're going to take a swstep exception in the
12563 * "did not step an insn" case, and so the syndrome ISV and EX
12564 * bits should be zero.
12566 assert(dc->base.num_insns == 1);
12567 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12568 default_exception_el(dc));
12569 dc->base.is_jmp = DISAS_NORETURN;
12576 static void arm_post_translate_insn(DisasContext *dc)
12578 if (dc->condjmp && !dc->base.is_jmp) {
12579 gen_set_label(dc->condlabel);
12582 dc->base.pc_next = dc->pc;
12583 translator_loop_temp_check(&dc->base);
12586 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12588 DisasContext *dc = container_of(dcbase, DisasContext, base);
12589 CPUARMState *env = cpu->env_ptr;
12592 if (arm_pre_translate_insn(dc)) {
12596 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
12599 disas_arm_insn(dc, insn);
12601 arm_post_translate_insn(dc);
12603 /* ARM is a fixed-length ISA. We performed the cross-page check
12604 in init_disas_context by adjusting max_insns. */
12607 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12609 /* Return true if this Thumb insn is always unconditional,
12610 * even inside an IT block. This is true of only a very few
12611 * instructions: BKPT, HLT, and SG.
12613 * A larger class of instructions are UNPREDICTABLE if used
12614 * inside an IT block; we do not need to detect those here, because
12615 * what we do by default (perform the cc check and update the IT
12616 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12617 * choice for those situations.
12619 * insn is either a 16-bit or a 32-bit instruction; the two are
12620 * distinguishable because for the 16-bit case the top 16 bits
12621 * are zeroes, and that isn't a valid 32-bit encoding.
12623 if ((insn & 0xffffff00) == 0xbe00) {
12628 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12629 !arm_dc_feature(s, ARM_FEATURE_M)) {
12630 /* HLT: v8A only. This is unconditional even when it is going to
12631 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12632 * For v7 cores this was a plain old undefined encoding and so
12633 * honours its cc check. (We might be using the encoding as
12634 * a semihosting trap, but we don't change the cc check behaviour
12635 * on that account, because a debugger connected to a real v7A
12636 * core and emulating semihosting traps by catching the UNDEF
12637 * exception would also only see cases where the cc check passed.
12638 * No guest code should be trying to do a HLT semihosting trap
12639 * in an IT block anyway.
12644 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12645 arm_dc_feature(s, ARM_FEATURE_M)) {
12653 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12655 DisasContext *dc = container_of(dcbase, DisasContext, base);
12656 CPUARMState *env = cpu->env_ptr;
12660 if (arm_pre_translate_insn(dc)) {
12664 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12665 is_16bit = thumb_insn_is_16bit(dc, insn);
12668 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12670 insn = insn << 16 | insn2;
12675 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
12676 uint32_t cond = dc->condexec_cond;
12678 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12679 dc->condlabel = gen_new_label();
12680 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12686 disas_thumb_insn(dc, insn);
12688 disas_thumb2_insn(dc, insn);
12691 /* Advance the Thumb condexec condition. */
12692 if (dc->condexec_mask) {
12693 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12694 ((dc->condexec_mask >> 4) & 1));
12695 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12696 if (dc->condexec_mask == 0) {
12697 dc->condexec_cond = 0;
12701 arm_post_translate_insn(dc);
12703 /* Thumb is a variable-length ISA. Stop translation when the next insn
12704 * will touch a new page. This ensures that prefetch aborts occur at
12707 * We want to stop the TB if the next insn starts in a new page,
12708 * or if it spans between this page and the next. This means that
12709 * if we're looking at the last halfword in the page we need to
12710 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12711 * or a 32-bit Thumb insn (which won't).
12712 * This is to avoid generating a silly TB with a single 16-bit insn
12713 * in it at the end of this page (which would execute correctly
12714 * but isn't very efficient).
12716 if (dc->base.is_jmp == DISAS_NEXT
12717 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12718 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
12719 && insn_crosses_page(env, dc)))) {
12720 dc->base.is_jmp = DISAS_TOO_MANY;
12724 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12726 DisasContext *dc = container_of(dcbase, DisasContext, base);
12728 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
12729 /* FIXME: This can theoretically happen with self-modifying code. */
12730 cpu_abort(cpu, "IO on conditional branch instruction");
12733 /* At this stage dc->condjmp will only be set when the skipped
12734 instruction was a conditional branch or trap, and the PC has
12735 already been written. */
12736 gen_set_condexec(dc);
12737 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12738 /* Exception return branches need some special case code at the
12739 * end of the TB, which is complex enough that it has to
12740 * handle the single-step vs not and the condition-failed
12741 * insn codepath itself.
12743 gen_bx_excret_final_code(dc);
12744 } else if (unlikely(is_singlestepping(dc))) {
12745 /* Unconditional and "condition passed" instruction codepath. */
12746 switch (dc->base.is_jmp) {
12748 gen_ss_advance(dc);
12749 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12750 default_exception_el(dc));
12753 gen_ss_advance(dc);
12754 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12757 gen_ss_advance(dc);
12758 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12761 case DISAS_TOO_MANY:
12763 gen_set_pc_im(dc, dc->pc);
12766 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12767 gen_singlestep_exception(dc);
12769 case DISAS_NORETURN:
12773 /* While branches must always occur at the end of an IT block,
12774 there are a few other things that can cause us to terminate
12775 the TB in the middle of an IT block:
12776 - Exception generating instructions (bkpt, swi, undefined).
12778 - Hardware watchpoints.
12779 Hardware breakpoints have already been handled and skip this code.
12781 switch(dc->base.is_jmp) {
12783 case DISAS_TOO_MANY:
12784 gen_goto_tb(dc, 1, dc->pc);
12790 gen_set_pc_im(dc, dc->pc);
12793 /* indicate that the hash table must be used to find the next TB */
12794 tcg_gen_exit_tb(NULL, 0);
12796 case DISAS_NORETURN:
12797 /* nothing more to generate */
12801 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12802 !(dc->insn & (1U << 31))) ? 2 : 4);
12804 gen_helper_wfi(cpu_env, tmp);
12805 tcg_temp_free_i32(tmp);
12806 /* The helper doesn't necessarily throw an exception, but we
12807 * must go back to the main loop to check for interrupts anyway.
12809 tcg_gen_exit_tb(NULL, 0);
12813 gen_helper_wfe(cpu_env);
12816 gen_helper_yield(cpu_env);
12819 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12820 default_exception_el(dc));
12823 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12826 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12832 /* "Condition failed" instruction codepath for the branch/trap insn */
12833 gen_set_label(dc->condlabel);
12834 gen_set_condexec(dc);
12835 if (unlikely(is_singlestepping(dc))) {
12836 gen_set_pc_im(dc, dc->pc);
12837 gen_singlestep_exception(dc);
12839 gen_goto_tb(dc, 1, dc->pc);
12843 /* Functions above can change dc->pc, so re-align db->pc_next */
12844 dc->base.pc_next = dc->pc;
12847 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12849 DisasContext *dc = container_of(dcbase, DisasContext, base);
12851 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12852 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
12855 static const TranslatorOps arm_translator_ops = {
12856 .init_disas_context = arm_tr_init_disas_context,
12857 .tb_start = arm_tr_tb_start,
12858 .insn_start = arm_tr_insn_start,
12859 .breakpoint_check = arm_tr_breakpoint_check,
12860 .translate_insn = arm_tr_translate_insn,
12861 .tb_stop = arm_tr_tb_stop,
12862 .disas_log = arm_tr_disas_log,
12865 static const TranslatorOps thumb_translator_ops = {
12866 .init_disas_context = arm_tr_init_disas_context,
12867 .tb_start = arm_tr_tb_start,
12868 .insn_start = arm_tr_insn_start,
12869 .breakpoint_check = arm_tr_breakpoint_check,
12870 .translate_insn = thumb_tr_translate_insn,
12871 .tb_stop = arm_tr_tb_stop,
12872 .disas_log = arm_tr_disas_log,
12875 /* generate intermediate code for basic block 'tb'. */
12876 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
12879 const TranslatorOps *ops = &arm_translator_ops;
12881 if (ARM_TBFLAG_THUMB(tb->flags)) {
12882 ops = &thumb_translator_ops;
12884 #ifdef TARGET_AARCH64
12885 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
12886 ops = &aarch64_translator_ops;
12890 translator_loop(ops, &dc.base, cpu, tb);
12893 static const char *cpu_mode_names[16] = {
12894 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12895 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12898 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12901 ARMCPU *cpu = ARM_CPU(cs);
12902 CPUARMState *env = &cpu->env;
12906 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12910 for(i=0;i<16;i++) {
12911 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12913 cpu_fprintf(f, "\n");
12915 cpu_fprintf(f, " ");
12918 if (arm_feature(env, ARM_FEATURE_M)) {
12919 uint32_t xpsr = xpsr_read(env);
12921 const char *ns_status = "";
12923 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12924 ns_status = env->v7m.secure ? "S " : "NS ";
12927 if (xpsr & XPSR_EXCP) {
12930 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
12931 mode = "unpriv-thread";
12933 mode = "priv-thread";
12937 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
12939 xpsr & XPSR_N ? 'N' : '-',
12940 xpsr & XPSR_Z ? 'Z' : '-',
12941 xpsr & XPSR_C ? 'C' : '-',
12942 xpsr & XPSR_V ? 'V' : '-',
12943 xpsr & XPSR_T ? 'T' : 'A',
12947 uint32_t psr = cpsr_read(env);
12948 const char *ns_status = "";
12950 if (arm_feature(env, ARM_FEATURE_EL3) &&
12951 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12952 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12955 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12957 psr & CPSR_N ? 'N' : '-',
12958 psr & CPSR_Z ? 'Z' : '-',
12959 psr & CPSR_C ? 'C' : '-',
12960 psr & CPSR_V ? 'V' : '-',
12961 psr & CPSR_T ? 'T' : 'A',
12963 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12966 if (flags & CPU_DUMP_FPU) {
12967 int numvfpregs = 0;
12968 if (arm_feature(env, ARM_FEATURE_VFP)) {
12971 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12974 for (i = 0; i < numvfpregs; i++) {
12975 uint64_t v = *aa32_vfp_dreg(env, i);
12976 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12977 i * 2, (uint32_t)v,
12978 i * 2 + 1, (uint32_t)(v >> 32),
12981 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12985 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12986 target_ulong *data)
12990 env->condexec_bits = 0;
12991 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12993 env->regs[15] = data[0];
12994 env->condexec_bits = data[1];
12995 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;